_id
stringlengths 64
64
| repository
stringlengths 6
84
| name
stringlengths 4
110
| content
stringlengths 0
248k
| license
null | download_url
stringlengths 89
454
| language
stringclasses 7
values | comments
stringlengths 0
74.6k
| code
stringlengths 0
248k
|
---|---|---|---|---|---|---|---|---|
5b44934b1a26d32faf401640591beae652560ab34f2191eaa1d16bcca3f0f7d2 | launchdarkly/erlang-server-sdk | ldclient_rollout_randomization_consistency_SUITE.erl | -module(ldclient_rollout_randomization_consistency_SUITE).
-include_lib("common_test/include/ct.hrl").
%% ct functions
-export([
all/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2
]).
%% Tests
-export([
bucket_context_by_key/1,
bucket_context_with_seed/1
]).
%%====================================================================
%% ct functions
%%====================================================================
all() ->
[
bucket_context_by_key,
bucket_context_with_seed
].
init_per_suite(Config) ->
Config.
end_per_suite(_) ->
ok.
init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, _) ->
ok.
%%====================================================================
%% Helpers
%%====================================================================
-spec fl_eq(float(), float()) -> nil.
fl_eq(L, R) ->
true = abs(L - R) < 0.0000001.
make_and_bucket(Seed, Key, Salty) ->
Point1 = ldclient_rollout:bucket_context(Seed, Key, Salty,
ldclient_context:new(<<"userKeyA">>), ldclient_attribute_reference:new(<<"key">>), <<"user">>),
Point2 = ldclient_rollout:bucket_context(Seed, Key, Salty,
ldclient_context:new(<<"userKeyB">>), ldclient_attribute_reference:new(<<"key">>), <<"user">>),
Point3 = ldclient_rollout:bucket_context(Seed, Key, Salty,
ldclient_context:new(<<"userKeyC">>), ldclient_attribute_reference:new(<<"key">>), <<"user">>),
{Point1, Point2, Point3}.
%%====================================================================
%% Tests
%%====================================================================
%% Note: These tests are meant to be exact duplicates of tests
%% in other SDKs. Do not change any of the values unless they
%% are also changed in other SDKs. These are not traditional behavioral
%% tests so much as consistency tests to guarantee that the implementation
%% is identical across SDKs.
bucket_context_by_key(_) ->
Seed = null,
Salty = <<"saltyA">>,
Key = <<"hashKey">>,
{Point1, Point2, Point3} = make_and_bucket(Seed, Key, Salty),
fl_eq(0.42157587, Point1),
fl_eq(0.6708485, Point2),
fl_eq(0.10343106, Point3).
bucket_context_with_seed(_) ->
Seed = 61,
Salty = <<"saltyA">>,
Key = <<"hashKey">>,
{Point1, Point2, Point3} = make_and_bucket(Seed, Key, Salty),
fl_eq(0.09801207, Point1),
fl_eq(0.14483777, Point2),
fl_eq(0.9242641, Point3).
| null | https://raw.githubusercontent.com/launchdarkly/erlang-server-sdk/d9a4442a8a214bf950dec8182b26cd042436f4c8/test/ldclient_rollout_randomization_consistency_SUITE.erl | erlang | ct functions
Tests
====================================================================
ct functions
====================================================================
====================================================================
Helpers
====================================================================
====================================================================
Tests
====================================================================
Note: These tests are meant to be exact duplicates of tests
in other SDKs. Do not change any of the values unless they
are also changed in other SDKs. These are not traditional behavioral
tests so much as consistency tests to guarantee that the implementation
is identical across SDKs. | -module(ldclient_rollout_randomization_consistency_SUITE).
-include_lib("common_test/include/ct.hrl").
-export([
all/0,
init_per_suite/1,
end_per_suite/1,
init_per_testcase/2,
end_per_testcase/2
]).
-export([
bucket_context_by_key/1,
bucket_context_with_seed/1
]).
all() ->
[
bucket_context_by_key,
bucket_context_with_seed
].
init_per_suite(Config) ->
Config.
end_per_suite(_) ->
ok.
init_per_testcase(_, Config) ->
Config.
end_per_testcase(_, _) ->
ok.
-spec fl_eq(float(), float()) -> nil.
fl_eq(L, R) ->
true = abs(L - R) < 0.0000001.
make_and_bucket(Seed, Key, Salty) ->
Point1 = ldclient_rollout:bucket_context(Seed, Key, Salty,
ldclient_context:new(<<"userKeyA">>), ldclient_attribute_reference:new(<<"key">>), <<"user">>),
Point2 = ldclient_rollout:bucket_context(Seed, Key, Salty,
ldclient_context:new(<<"userKeyB">>), ldclient_attribute_reference:new(<<"key">>), <<"user">>),
Point3 = ldclient_rollout:bucket_context(Seed, Key, Salty,
ldclient_context:new(<<"userKeyC">>), ldclient_attribute_reference:new(<<"key">>), <<"user">>),
{Point1, Point2, Point3}.
bucket_context_by_key(_) ->
Seed = null,
Salty = <<"saltyA">>,
Key = <<"hashKey">>,
{Point1, Point2, Point3} = make_and_bucket(Seed, Key, Salty),
fl_eq(0.42157587, Point1),
fl_eq(0.6708485, Point2),
fl_eq(0.10343106, Point3).
bucket_context_with_seed(_) ->
Seed = 61,
Salty = <<"saltyA">>,
Key = <<"hashKey">>,
{Point1, Point2, Point3} = make_and_bucket(Seed, Key, Salty),
fl_eq(0.09801207, Point1),
fl_eq(0.14483777, Point2),
fl_eq(0.9242641, Point3).
|
660cf5798021449be1fdbabcbc3509d71ccdd5f511b2f363ea5e555c38d25901 | yesodweb/persistent | MkSqlBackend.hs | {-# LANGUAGE RankNTypes #-}
module Database.Persist.SqlBackend.Internal.MkSqlBackend where
import Control.Monad.Logger (Loc, LogLevel, LogSource, LogStr)
import Data.Text (Text)
import Database.Persist.Names
import Database.Persist.SqlBackend.Internal.InsertSqlResult
import Database.Persist.SqlBackend.Internal.IsolationLevel
import Database.Persist.SqlBackend.Internal.Statement
import Database.Persist.Types.Base
import Data.Map (Map)
import Data.IORef (IORef)
-- | This type shares many of the same field names as the 'SqlBackend' type.
-- It's useful for library authors to use this when migrating from using the
' SqlBackend ' constructor directly to the ' mkSqlBackend ' function .
--
This type will only contain required fields for constructing a ' SqlBackend ' .
-- For fields that aren't present on this record, you'll want to use the various
-- @set@ functions or
--
@since 2.13.0.0
data MkSqlBackendArgs = MkSqlBackendArgs
{ connPrepare :: Text -> IO Statement
-- ^ This function should prepare a 'Statement' in the target database,
-- which should allow for efficient query reuse.
, connInsertSql :: EntityDef -> [PersistValue] -> InsertSqlResult
-- ^ This function generates the SQL and values necessary for
-- performing an insert against the database.
, connStmtMap :: IORef (Map Text Statement)
-- ^ A reference to the cache of statements. 'Statement's are keyed by
-- the 'Text' queries that generated them.
, connClose :: IO ()
-- ^ Close the underlying connection.
, connMigrateSql
:: [EntityDef]
-> (Text -> IO Statement)
-> EntityDef
-> IO (Either [Text] [(Bool, Text)])
-- ^ This function returns the migrations required to include the
' EntityDef ' parameter in the @['EntityDef']@ database . This might
-- include creating a new table if the entity is not present, or
-- altering an existing table if it is.
, connBegin :: (Text -> IO Statement) -> Maybe IsolationLevel -> IO ()
-- ^ A function to begin a transaction for the underlying database.
, connCommit :: (Text -> IO Statement) -> IO ()
-- ^ A function to commit a transaction to the underlying database.
, connRollback :: (Text -> IO Statement) -> IO ()
-- ^ A function to roll back a transaction on the underlying database.
, connEscapeFieldName :: FieldNameDB -> Text
-- ^ A function to extract and escape the name of the column corresponding
-- to the provided field.
--
@since 2.12.0.0
, connEscapeTableName :: EntityDef -> Text
-- ^ A function to extract and escape the name of the table corresponding
-- to the provided entity. PostgreSQL uses this to support schemas.
--
@since 2.12.0.0
, connEscapeRawName :: Text -> Text
-- ^ A function to escape raw DB identifiers. MySQL uses backticks, while
-- PostgreSQL uses quotes, and so on.
--
@since 2.12.0.0
, connNoLimit :: Text
, connRDBMS :: Text
^ A tag displaying what database the ' SqlBackend ' is for . Can be
-- used to differentiate features in downstream libraries for different
-- database backends.
, connLimitOffset :: (Int,Int) -> Text -> Text
-- ^ Attach a 'LIMIT/OFFSET' clause to a SQL query. Note that
LIMIT / OFFSET is problematic for performance , and indexed range
-- queries are the superior way to offer pagination.
, connLogFunc :: LogFunc
^ A log function for the ' SqlBackend ' to use .
}
type LogFunc = Loc -> LogSource -> LogLevel -> LogStr -> IO ()
| null | https://raw.githubusercontent.com/yesodweb/persistent/d7a67f0fea5e07f6f6562a54c0838de23c51d387/persistent/Database/Persist/SqlBackend/Internal/MkSqlBackend.hs | haskell | # LANGUAGE RankNTypes #
| This type shares many of the same field names as the 'SqlBackend' type.
It's useful for library authors to use this when migrating from using the
For fields that aren't present on this record, you'll want to use the various
@set@ functions or
^ This function should prepare a 'Statement' in the target database,
which should allow for efficient query reuse.
^ This function generates the SQL and values necessary for
performing an insert against the database.
^ A reference to the cache of statements. 'Statement's are keyed by
the 'Text' queries that generated them.
^ Close the underlying connection.
^ This function returns the migrations required to include the
include creating a new table if the entity is not present, or
altering an existing table if it is.
^ A function to begin a transaction for the underlying database.
^ A function to commit a transaction to the underlying database.
^ A function to roll back a transaction on the underlying database.
^ A function to extract and escape the name of the column corresponding
to the provided field.
^ A function to extract and escape the name of the table corresponding
to the provided entity. PostgreSQL uses this to support schemas.
^ A function to escape raw DB identifiers. MySQL uses backticks, while
PostgreSQL uses quotes, and so on.
used to differentiate features in downstream libraries for different
database backends.
^ Attach a 'LIMIT/OFFSET' clause to a SQL query. Note that
queries are the superior way to offer pagination. |
module Database.Persist.SqlBackend.Internal.MkSqlBackend where
import Control.Monad.Logger (Loc, LogLevel, LogSource, LogStr)
import Data.Text (Text)
import Database.Persist.Names
import Database.Persist.SqlBackend.Internal.InsertSqlResult
import Database.Persist.SqlBackend.Internal.IsolationLevel
import Database.Persist.SqlBackend.Internal.Statement
import Database.Persist.Types.Base
import Data.Map (Map)
import Data.IORef (IORef)
' SqlBackend ' constructor directly to the ' mkSqlBackend ' function .
This type will only contain required fields for constructing a ' SqlBackend ' .
@since 2.13.0.0
data MkSqlBackendArgs = MkSqlBackendArgs
{ connPrepare :: Text -> IO Statement
, connInsertSql :: EntityDef -> [PersistValue] -> InsertSqlResult
, connStmtMap :: IORef (Map Text Statement)
, connClose :: IO ()
, connMigrateSql
:: [EntityDef]
-> (Text -> IO Statement)
-> EntityDef
-> IO (Either [Text] [(Bool, Text)])
' EntityDef ' parameter in the @['EntityDef']@ database . This might
, connBegin :: (Text -> IO Statement) -> Maybe IsolationLevel -> IO ()
, connCommit :: (Text -> IO Statement) -> IO ()
, connRollback :: (Text -> IO Statement) -> IO ()
, connEscapeFieldName :: FieldNameDB -> Text
@since 2.12.0.0
, connEscapeTableName :: EntityDef -> Text
@since 2.12.0.0
, connEscapeRawName :: Text -> Text
@since 2.12.0.0
, connNoLimit :: Text
, connRDBMS :: Text
^ A tag displaying what database the ' SqlBackend ' is for . Can be
, connLimitOffset :: (Int,Int) -> Text -> Text
LIMIT / OFFSET is problematic for performance , and indexed range
, connLogFunc :: LogFunc
^ A log function for the ' SqlBackend ' to use .
}
type LogFunc = Loc -> LogSource -> LogLevel -> LogStr -> IO ()
|
2017136e587071a7f25bafcf25c10ac20902314e40e53a76910d0c972199feae | aib/Project-Euler | euler93.hs | import Data.Function
import Data.List
targets :: [a -> a -> a] -> [a] -> [a]
targets ops nums = do
opOrder < - concatMap permutations $ multicombinations ( length nums - 1 ) ops
opOrder <- sequence . replicate (length nums - 1) $ ops
numOrder <- permutations nums
return $ target opOrder numOrder
where
target ops nums = foldr ($) (last nums) $ zipWith ($) ops (init nums)
ops = [(+),(*),(-),flip (-),safeDiv,flip safeDiv]
where
safeDiv x y = if y == 0
then 0
else x / y
main = do
print $ maximumBy (compare `on` snd) $ map targetLength allForms
where
allForms = do d <- [3..9]
c <- [2..d - 1]
b <- [1..c - 1]
a <- [0..b - 1]
return ((a,b,c,d), sort . nub . filter (>0) . filter isWhole $ targets ops [a,b,c,d])
isWhole x = (fromInteger . truncate) x == x
targetLength (nums, targets) = (nums, length . takeWhile id . zipWith (==) [1..] $ targets) | null | https://raw.githubusercontent.com/aib/Project-Euler/a01a142d360f3a30fcea69ab29d4d198160ee921/euler93.hs | haskell | import Data.Function
import Data.List
targets :: [a -> a -> a] -> [a] -> [a]
targets ops nums = do
opOrder < - concatMap permutations $ multicombinations ( length nums - 1 ) ops
opOrder <- sequence . replicate (length nums - 1) $ ops
numOrder <- permutations nums
return $ target opOrder numOrder
where
target ops nums = foldr ($) (last nums) $ zipWith ($) ops (init nums)
ops = [(+),(*),(-),flip (-),safeDiv,flip safeDiv]
where
safeDiv x y = if y == 0
then 0
else x / y
main = do
print $ maximumBy (compare `on` snd) $ map targetLength allForms
where
allForms = do d <- [3..9]
c <- [2..d - 1]
b <- [1..c - 1]
a <- [0..b - 1]
return ((a,b,c,d), sort . nub . filter (>0) . filter isWhole $ targets ops [a,b,c,d])
isWhole x = (fromInteger . truncate) x == x
targetLength (nums, targets) = (nums, length . takeWhile id . zipWith (==) [1..] $ targets) |
|
a55f27ee99794b27ef5d4b3480709cdb870e3ea55ad9e96775a63aadc9d4a5ca | MastodonC/witan.ui | shared.clj | (ns witan.ui.style.shared
(:require [garden.units :refer [px em percent]]
[witan.ui.style.colour :as colour]
[witan.ui.style.fonts :as fonts]
[witan.ui.style.values :as values]
[witan.ui.style.util :refer [transition]]))
(def style [[:.shared-search-input
{:position :relative}
[:i
{:position :absolute
:vertical-align :middle
:margin (em 0.24)}]
[:input
{:padding-left (px 30)
:height (px 34)
:width (percent 100)}]]
;;;;;;;;;;;;;;
[:.shared-checkbox
[:label
{:margin-left (px 10)
:vertical-align "middle"}
[:input
{:vertical-align "middle"
:position "relative"
:bottom (px 1)}]]]
[:.shared-table
^:prefix {:user-select :none}
[:.pure-table.pure-table-horizontal
{:width (percent 100)}]
[:#loading
{:margin-top (em 2)}]
[:th
{:color colour/table-header-text
:font-weight :normal
:cursor :default}]
[:tbody
[:tr
{:transition (transition :background-color "0.15s"
:color "0.15s")
:height (px 40)}
[:&:hover
{:background-color colour/table-row-hover-bg
:color colour/table-row-hover-text
:cursor :pointer}]
[:&.selected
{:background-color colour/table-row-selected-bg
:color colour/table-row-selected-text
:cursor :pointer}]]]]
;;;;;;;;;;;;;;
[:.sharing-matrix
[:.pure-table.pure-table-horizontal
{:width (percent 100)}]
[:#loading
{:margin-top (em 2)}]
[:th
{:color colour/table-header-text
:font-weight :normal
:cursor :default}]
[:thead
[:th
[:&:first-child {:width "50%"}]]]
[:tbody
[:tr
{:height (em 3)}
[:td
[:&:first-child {:width "50%"}]]]]]
;;;;;;;;;;;;;;
[:.shared-heading
{:background-color colour/dash-heading-bg
:box-shadow "0px 2px 4px #888"
:position :relative
:height values/app-peripheral-height
:z-index 50
:display :flex
:align-items :center
:justify-content :flex-start
:min-width (px 325)}
[:h1 :h2
{:position :relative
:float :left
:padding (px 0)
:margin-left (px 10)}]
[:h1
{:line-height (em 1)
:font-weight 700}]
[:h2
{:font-family fonts/base-fonts
:font-weight 500
:font-size (em 1.2)}]
[:span
{:margin [[(px 15) (px 10) (px 10) (px 10)]]}]
[:.shared-search-input
{:display :inline-flex
:font-size (px 14)
:vertical-align :super
:margin-left (em 1)}
[:form
{:width (em 32)}]]
[:&.center-string
[:h1
{:width (percent 100)
:text-align :center}]]]
;;;;;;;;;;;;;;
[:.shared-inline-group :.shared-inline-schema
{:display :inline}
[:.group-icon :.schema-icon
{:display :inline
:vertical-align :sub
:margin-right (em 0.2)}]
[:.you
{:margin-left (px 4)
:cursor :default}]]
[:.shared-inline-file-title
{:display :flex
:align-items :center
:text-overflow :ellipsis
:width (percent 100)}
[:h1 :h2 :h3 :h4 :h5
{:margin [[(em 0.0) (em 0.3)]]
:line-height (em 1.6)
:white-space :nowrap
:overflow :hidden
:text-overflow :ellipsis}]]
;;;;;;;;;;;;;;
[:.button-container
{:align-self :center}
[:.material-icons
{:vertical-align :middle}]
[:button
{:margin-left (em 0.5)
:box-shadow [[(px 2) (px 2) (px 4) colour/box-shadow]]}]]
;;;;;;;;;;;;;;
[:.error
{:color colour/error}]
[:.success
{:color colour/success}]
[:.btn-success
{:background-color colour/success
:color 'white}]
[:.btn-danger
{:background-color colour/danger
:color 'white}]
[:.btn-error
{:background-color colour/error
:color 'white}]
;;;;;;;;;;;;;;
[:.space-after
{:margin-bottom (em 1)}]
;;;;;;;;;;;;;;
[:.rotates
{:transition (transition :transform "0.3s")}]
[:.rotate0
{:transform "rotate(0deg)"}]
[:.rotate270
{:transform "rotate(-90deg)"}]
;;;;;;;;;;;;;;
[:.shared-index
[:.alpha-header
[:.alpha-header-clickable
{:color 'blue
:cursor :pointer}]
[:a :span
{:margin-right (px 4)
:font-size (px 18)}]]
[:.alpha-index
[:h1
{:font-size (px 24)
:font-weight :bold
:font-family fonts/base-fonts}]]]
;;;;;;;;;;;;;;
[:div.shared-info-panel
{:display :flex
:background-color colour/info-bg
:border [[(px 1) colour/info-border 'solid]]
:padding (em 0.5)
:margin [[(em 0.4) (em 0)]]}
[:div
{:font-size (px 11)
:font-style :italic
:color colour/info-text
:display :flex
:justify-content :center
:align-content :center
:flex-direction :column
:vertical-align :middle}]
[:i
{:padding-right (em 0.5)}]
[:.message
{:overflow :hidden
:line-height (em 1.4)}]]
;;;;;;;;;;;;;;
[:.number-circle
{:border-radius (percent 50)
:width (px 20)
:height (px 18)
:line-height (px 18)
:padding (px 3)
:background colour/button-create
:color colour/body-bg
:text-align :center
:font-size (px 12)
:font-weight :bold
}]
;;;;;;;;;;;;;;;
[:.shared-schema-search-area :.shared-group-search-area :.shared-search-area
[:div.breakout-area
{:display :flex
:overflow :hidden
:transition (transition :height "0.3s")
:width (percent 100)
:margin (px 10)}
[:.shared-table
{:width (percent 100)
:border [[(px 1) colour/gutter 'solid]]
:overflow-y :scroll
:overflow-x :hidden}
[:.pure-table.pure-table-horizontal
{:border 0}]
[:.shared-table-rows
[:tbody>tr:last-child>td
{:border-bottom [[(px 1) "#cbcbcb" 'solid]]}]]
[:.pure-button
]]
[:.close
{:color 'silver
:cursor :pointer}
[:&:hover
{:color colour/side-bg}]]]]
;;;;;;;;;;;;;;;
[:.shared-progress-bar
{:border [[(px 1) 'solid colour/progress-bar-border]]
:border-radius (em 0.3)
:margin [[(em 1) 0]]
:height (px 14)
:overflow :hidden
}]
[:.shared-progress-bar-inner
{:background-color colour/progress-bar-fill
:border [[(px 10) 'solid colour/progress-bar-fill]]
:margin-left (px -5)
:margin-top (px -5)
:height (percent 100)}]
[:.shared-tabs
{:display :flex
:justify-content :center
:background-color "#eee"
:box-shadow [[(px 0) (px 1) (px 4) "rgba(0,0,0,.14)"]]}
[:.shared-tab
{:margin [[(em 0.0) (em 0.75)]]
:margin-top (em 0.8)
:color colour/subtle-grey4
:cursor :pointer
:font-size (px 16)}
[:&:hover
{:color colour/clickable}]]
[:.shared-tab-selected
{:color colour/title-fonts-colour
:border-bottom [[(px 2) 'solid colour/switcher-button-selected]]}]]
[:.shared-tag
{:display :inline
:margin (em 0.3)
:padding [[(em 0.4) (em 0.3)]]
:font-size (em 0.9)
:background-color colour/tag-bg
:border [[(px 1) 'solid colour/tag-border]]}
[:.tag-close
{:display :inline}]
[:i
{:font-size (px 10)
:font-weight 700
:padding (px 1)
:margin-right (px 3)}
[:&:hover
{:color 'white}]]]
[:.shared-tag-clickable
{:cursor :pointer}
[:span
[:&:hover
{:color colour/body-bg}]]]
[:.clickable-text
{:color colour/clickable
:cursor :pointer}
[:&:hover
{:color colour/clickable-hovered}]]
[:.shared-collapsible-text
[:div
{:margin-top (px 3)
:margin-left (px 2)}
[:&.rotate270
{:margin-top (px -3)
:margin-left (px 3)}]]
[:span
{:margin-top (px 4)
:margin-left (px 1)}
[:&.ellipsis
{:margin-top (px 2)
:margin-left (px -2)}]]
[:i
{:cursor :pointer}]]
[:.editable-field
{:padding (em 1)
:margin-bottom (em 1)
:line-height (em 1.7)
:border-color colour/subtle-grey
:border-radius (px 2)
:box-shadow [[(px 0) (px 1) (px 4) "rgba(0,0,0,.14)"]]
:position :relative}
[:&:hover
{}]
[:span.clickable-text.edit-label
{:font-size (px 12)
:height (em 0.75)
:line-height (em 0.75)
:position :absolute
:right (px 8)
:bottom (px 8)}]
[:.heading
{:margin-top (em 0)}]
[:.intro
{:line-height (em 1.5)
:display :block
:font-size (px 11)
:color 'dimgrey
:margin-bottom (em 1)}]
[:.editable-field-content
{:display :flex
:justify-content :space-between
:vertical-align :bottom
:align-items :flex-end}]]
[:.editable-field-editing
{}]
[:.btn-pagination
{:padding (px 2)}
[:span {:margin-right (px 5)}]]
;;;;;;;;;;;;;;;
[:.shared-dropdown.button-container
[:button
{:margin-left (px 0)
:width (percent 100)}]]
[".shared-dropdown[disabled]"
{:border [[(px 1) 'silver 'solid]]}]
[:.shared-dropdown
{:position :relative
:width (percent 100)
:margin-bottom (em 1.4)}
[:.shared-inline-group
{:padding-top (px 2)}
[:.group-icon
{:vertical-align :initial}]]
[:.shared-dropdown-content
{:position :absolute
:margin-top (px 1)
:width (percent 100)
:background-color "#E6E6E6"
:box-shadow [[(px 2) (px 2) (px 4) "#888"]]
:border-radius (px 2)
:font-family "'Lato',Helvetica Neue,Helvetica,Arial,sans-serif"
:font-size (px 13)
:line-height :normal
:cursor :pointer
:z-index 1000}
[:.shared-inline-group
{:padding [[(em 0.5) (em 0.75)]]
:display :block}]
[:.shared-inline-group:hover
{:background-color colour/subtle-grey2}]]]])
| null | https://raw.githubusercontent.com/MastodonC/witan.ui/5ea6baaff52824ac61737911dd2c11b451157bad/src/styles/witan/ui/style/shared.clj | clojure | (ns witan.ui.style.shared
(:require [garden.units :refer [px em percent]]
[witan.ui.style.colour :as colour]
[witan.ui.style.fonts :as fonts]
[witan.ui.style.values :as values]
[witan.ui.style.util :refer [transition]]))
(def style [[:.shared-search-input
{:position :relative}
[:i
{:position :absolute
:vertical-align :middle
:margin (em 0.24)}]
[:input
{:padding-left (px 30)
:height (px 34)
:width (percent 100)}]]
[:.shared-checkbox
[:label
{:margin-left (px 10)
:vertical-align "middle"}
[:input
{:vertical-align "middle"
:position "relative"
:bottom (px 1)}]]]
[:.shared-table
^:prefix {:user-select :none}
[:.pure-table.pure-table-horizontal
{:width (percent 100)}]
[:#loading
{:margin-top (em 2)}]
[:th
{:color colour/table-header-text
:font-weight :normal
:cursor :default}]
[:tbody
[:tr
{:transition (transition :background-color "0.15s"
:color "0.15s")
:height (px 40)}
[:&:hover
{:background-color colour/table-row-hover-bg
:color colour/table-row-hover-text
:cursor :pointer}]
[:&.selected
{:background-color colour/table-row-selected-bg
:color colour/table-row-selected-text
:cursor :pointer}]]]]
[:.sharing-matrix
[:.pure-table.pure-table-horizontal
{:width (percent 100)}]
[:#loading
{:margin-top (em 2)}]
[:th
{:color colour/table-header-text
:font-weight :normal
:cursor :default}]
[:thead
[:th
[:&:first-child {:width "50%"}]]]
[:tbody
[:tr
{:height (em 3)}
[:td
[:&:first-child {:width "50%"}]]]]]
[:.shared-heading
{:background-color colour/dash-heading-bg
:box-shadow "0px 2px 4px #888"
:position :relative
:height values/app-peripheral-height
:z-index 50
:display :flex
:align-items :center
:justify-content :flex-start
:min-width (px 325)}
[:h1 :h2
{:position :relative
:float :left
:padding (px 0)
:margin-left (px 10)}]
[:h1
{:line-height (em 1)
:font-weight 700}]
[:h2
{:font-family fonts/base-fonts
:font-weight 500
:font-size (em 1.2)}]
[:span
{:margin [[(px 15) (px 10) (px 10) (px 10)]]}]
[:.shared-search-input
{:display :inline-flex
:font-size (px 14)
:vertical-align :super
:margin-left (em 1)}
[:form
{:width (em 32)}]]
[:&.center-string
[:h1
{:width (percent 100)
:text-align :center}]]]
[:.shared-inline-group :.shared-inline-schema
{:display :inline}
[:.group-icon :.schema-icon
{:display :inline
:vertical-align :sub
:margin-right (em 0.2)}]
[:.you
{:margin-left (px 4)
:cursor :default}]]
[:.shared-inline-file-title
{:display :flex
:align-items :center
:text-overflow :ellipsis
:width (percent 100)}
[:h1 :h2 :h3 :h4 :h5
{:margin [[(em 0.0) (em 0.3)]]
:line-height (em 1.6)
:white-space :nowrap
:overflow :hidden
:text-overflow :ellipsis}]]
[:.button-container
{:align-self :center}
[:.material-icons
{:vertical-align :middle}]
[:button
{:margin-left (em 0.5)
:box-shadow [[(px 2) (px 2) (px 4) colour/box-shadow]]}]]
[:.error
{:color colour/error}]
[:.success
{:color colour/success}]
[:.btn-success
{:background-color colour/success
:color 'white}]
[:.btn-danger
{:background-color colour/danger
:color 'white}]
[:.btn-error
{:background-color colour/error
:color 'white}]
[:.space-after
{:margin-bottom (em 1)}]
[:.rotates
{:transition (transition :transform "0.3s")}]
[:.rotate0
{:transform "rotate(0deg)"}]
[:.rotate270
{:transform "rotate(-90deg)"}]
[:.shared-index
[:.alpha-header
[:.alpha-header-clickable
{:color 'blue
:cursor :pointer}]
[:a :span
{:margin-right (px 4)
:font-size (px 18)}]]
[:.alpha-index
[:h1
{:font-size (px 24)
:font-weight :bold
:font-family fonts/base-fonts}]]]
[:div.shared-info-panel
{:display :flex
:background-color colour/info-bg
:border [[(px 1) colour/info-border 'solid]]
:padding (em 0.5)
:margin [[(em 0.4) (em 0)]]}
[:div
{:font-size (px 11)
:font-style :italic
:color colour/info-text
:display :flex
:justify-content :center
:align-content :center
:flex-direction :column
:vertical-align :middle}]
[:i
{:padding-right (em 0.5)}]
[:.message
{:overflow :hidden
:line-height (em 1.4)}]]
[:.number-circle
{:border-radius (percent 50)
:width (px 20)
:height (px 18)
:line-height (px 18)
:padding (px 3)
:background colour/button-create
:color colour/body-bg
:text-align :center
:font-size (px 12)
:font-weight :bold
}]
[:.shared-schema-search-area :.shared-group-search-area :.shared-search-area
[:div.breakout-area
{:display :flex
:overflow :hidden
:transition (transition :height "0.3s")
:width (percent 100)
:margin (px 10)}
[:.shared-table
{:width (percent 100)
:border [[(px 1) colour/gutter 'solid]]
:overflow-y :scroll
:overflow-x :hidden}
[:.pure-table.pure-table-horizontal
{:border 0}]
[:.shared-table-rows
[:tbody>tr:last-child>td
{:border-bottom [[(px 1) "#cbcbcb" 'solid]]}]]
[:.pure-button
]]
[:.close
{:color 'silver
:cursor :pointer}
[:&:hover
{:color colour/side-bg}]]]]
[:.shared-progress-bar
{:border [[(px 1) 'solid colour/progress-bar-border]]
:border-radius (em 0.3)
:margin [[(em 1) 0]]
:height (px 14)
:overflow :hidden
}]
[:.shared-progress-bar-inner
{:background-color colour/progress-bar-fill
:border [[(px 10) 'solid colour/progress-bar-fill]]
:margin-left (px -5)
:margin-top (px -5)
:height (percent 100)}]
[:.shared-tabs
{:display :flex
:justify-content :center
:background-color "#eee"
:box-shadow [[(px 0) (px 1) (px 4) "rgba(0,0,0,.14)"]]}
[:.shared-tab
{:margin [[(em 0.0) (em 0.75)]]
:margin-top (em 0.8)
:color colour/subtle-grey4
:cursor :pointer
:font-size (px 16)}
[:&:hover
{:color colour/clickable}]]
[:.shared-tab-selected
{:color colour/title-fonts-colour
:border-bottom [[(px 2) 'solid colour/switcher-button-selected]]}]]
[:.shared-tag
{:display :inline
:margin (em 0.3)
:padding [[(em 0.4) (em 0.3)]]
:font-size (em 0.9)
:background-color colour/tag-bg
:border [[(px 1) 'solid colour/tag-border]]}
[:.tag-close
{:display :inline}]
[:i
{:font-size (px 10)
:font-weight 700
:padding (px 1)
:margin-right (px 3)}
[:&:hover
{:color 'white}]]]
[:.shared-tag-clickable
{:cursor :pointer}
[:span
[:&:hover
{:color colour/body-bg}]]]
[:.clickable-text
{:color colour/clickable
:cursor :pointer}
[:&:hover
{:color colour/clickable-hovered}]]
[:.shared-collapsible-text
[:div
{:margin-top (px 3)
:margin-left (px 2)}
[:&.rotate270
{:margin-top (px -3)
:margin-left (px 3)}]]
[:span
{:margin-top (px 4)
:margin-left (px 1)}
[:&.ellipsis
{:margin-top (px 2)
:margin-left (px -2)}]]
[:i
{:cursor :pointer}]]
[:.editable-field
{:padding (em 1)
:margin-bottom (em 1)
:line-height (em 1.7)
:border-color colour/subtle-grey
:border-radius (px 2)
:box-shadow [[(px 0) (px 1) (px 4) "rgba(0,0,0,.14)"]]
:position :relative}
[:&:hover
{}]
[:span.clickable-text.edit-label
{:font-size (px 12)
:height (em 0.75)
:line-height (em 0.75)
:position :absolute
:right (px 8)
:bottom (px 8)}]
[:.heading
{:margin-top (em 0)}]
[:.intro
{:line-height (em 1.5)
:display :block
:font-size (px 11)
:color 'dimgrey
:margin-bottom (em 1)}]
[:.editable-field-content
{:display :flex
:justify-content :space-between
:vertical-align :bottom
:align-items :flex-end}]]
[:.editable-field-editing
{}]
[:.btn-pagination
{:padding (px 2)}
[:span {:margin-right (px 5)}]]
[:.shared-dropdown.button-container
[:button
{:margin-left (px 0)
:width (percent 100)}]]
[".shared-dropdown[disabled]"
{:border [[(px 1) 'silver 'solid]]}]
[:.shared-dropdown
{:position :relative
:width (percent 100)
:margin-bottom (em 1.4)}
[:.shared-inline-group
{:padding-top (px 2)}
[:.group-icon
{:vertical-align :initial}]]
[:.shared-dropdown-content
{:position :absolute
:margin-top (px 1)
:width (percent 100)
:background-color "#E6E6E6"
:box-shadow [[(px 2) (px 2) (px 4) "#888"]]
:border-radius (px 2)
:font-family "'Lato',Helvetica Neue,Helvetica,Arial,sans-serif"
:font-size (px 13)
:line-height :normal
:cursor :pointer
:z-index 1000}
[:.shared-inline-group
{:padding [[(em 0.5) (em 0.75)]]
:display :block}]
[:.shared-inline-group:hover
{:background-color colour/subtle-grey2}]]]])
|
|
606b0d154fba9e54e757d26506457c4a155b32db0b0896f4f719c03a2b976b74 | racket/typed-racket | type-alias-env.rkt | #lang racket/base
(require "env-utils.rkt"
"../utils/utils.rkt"
syntax/private/id-table
(only-in "../rep/type-rep.rkt" Type? Name? Name-id)
"../utils/tc-utils.rkt"
"../typecheck/renamer.rkt"
(prefix-in c: (contract-req))
racket/match)
(provide register-type-alias
lookup-type-alias
resolve-type-aliases
register-resolved-type-alias
type-alias-env-map
type-alias-env-for-each
incomplete-name-alias-map start-type-alias-registration! complete-type-alias-registration! complete-name?)
;; a parameter, whose value is either:
;; false, which indicates the type checker is not registering type alias
;; a mapping from id to boolean otherwise.
(define incomplete-name-alias-map (make-parameter #f))
;; signal that the type alias will start registration
(define/cond-contract (start-type-alias-registration! id name)
(c:-> identifier? Name? void?)
(register-resolved-type-alias id name)
(free-id-table-set! (incomplete-name-alias-map) id #t))
;; signal that the type alias will finish registration
(define/cond-contract (complete-type-alias-registration! id)
(c:-> identifier? void?)
(free-id-table-remove! (incomplete-name-alias-map) id))
;; if the input is a Name?, return whether the type alias it represents has
;; finished registration. Otherwise return false.
(define/cond-contract (complete-name? name)
(c:-> Type? boolean?)
(and (Name? name)
(or (not (incomplete-name-alias-map))
(not (free-id-table-ref (incomplete-name-alias-map) (Name-id name) #f)))))
(define-struct alias-def () #:inspector #f)
;; persistent?: indicates if the resolved result will be used to swap out the
previous unresolved one
(define-struct (unresolved alias-def) (stx [in-process #:mutable] persistent?) #:inspector #f)
(define-struct (resolved alias-def) (ty) #:inspector #f)
;; a mapping from id -> alias-def (where id is the name of the type)
(define the-mapping
(make-free-id-table))
(define (mapping-put! id v)
(free-id-table-set! the-mapping id v))
;(trace mapping-put!)
;; add a name to the mapping
;; identifier type-stx -> void
(define (register-type-alias id stx [persistent? #t])
(mapping-put! id (make-unresolved stx #f persistent?)))
(define (register-resolved-type-alias id ty)
(mapping-put! id (make-resolved ty)))
(define (lookup-type-alias id parse-type [k (lambda () (tc-error "Unknown type alias: ~a" (syntax-e id)))])
(match (or (free-id-table-ref the-mapping id #f)
(free-id-table-ref the-mapping (un-rename id) #f))
[#f (k)]
[(struct unresolved (stx #f _))
(resolve-type-alias id parse-type)]
[(struct unresolved (stx #t _))
(tc-error/stx stx "Recursive Type Alias Reference")]
[(struct resolved (t)) t]))
(define (resolve-type-alias id parse-type)
(define v (free-id-table-ref the-mapping id))
(match v
[(struct unresolved (stx _ persistent?))
(set-unresolved-in-process! v #t)
(let ([t (parse-type stx)])
(when persistent?
(mapping-put! id (make-resolved t)))
t)]
[(struct resolved (t))
t]))
(define (resolve-type-aliases parse-type)
(for ([id (in-list (free-id-table-keys the-mapping))])
(resolve-type-alias id parse-type)))
;; map over the-mapping, producing a list
;; (id type -> T) -> listof[T]
(define (type-alias-env-map f)
(for/list ([(id t) (in-sorted-free-id-table the-mapping)]
#:when (resolved? t))
(f id (resolved-ty t))))
(define (type-alias-env-for-each f)
(for ([(id t) (in-sorted-free-id-table the-mapping)]
#:when (resolved? t))
(f id (resolved-ty t))))
| null | https://raw.githubusercontent.com/racket/typed-racket/d65ff8bd7146b15b79f31506f8aca410aed196e4/typed-racket-lib/typed-racket/env/type-alias-env.rkt | racket | a parameter, whose value is either:
false, which indicates the type checker is not registering type alias
a mapping from id to boolean otherwise.
signal that the type alias will start registration
signal that the type alias will finish registration
if the input is a Name?, return whether the type alias it represents has
finished registration. Otherwise return false.
persistent?: indicates if the resolved result will be used to swap out the
a mapping from id -> alias-def (where id is the name of the type)
(trace mapping-put!)
add a name to the mapping
identifier type-stx -> void
map over the-mapping, producing a list
(id type -> T) -> listof[T] | #lang racket/base
(require "env-utils.rkt"
"../utils/utils.rkt"
syntax/private/id-table
(only-in "../rep/type-rep.rkt" Type? Name? Name-id)
"../utils/tc-utils.rkt"
"../typecheck/renamer.rkt"
(prefix-in c: (contract-req))
racket/match)
(provide register-type-alias
lookup-type-alias
resolve-type-aliases
register-resolved-type-alias
type-alias-env-map
type-alias-env-for-each
incomplete-name-alias-map start-type-alias-registration! complete-type-alias-registration! complete-name?)
(define incomplete-name-alias-map (make-parameter #f))
(define/cond-contract (start-type-alias-registration! id name)
(c:-> identifier? Name? void?)
(register-resolved-type-alias id name)
(free-id-table-set! (incomplete-name-alias-map) id #t))
(define/cond-contract (complete-type-alias-registration! id)
(c:-> identifier? void?)
(free-id-table-remove! (incomplete-name-alias-map) id))
(define/cond-contract (complete-name? name)
(c:-> Type? boolean?)
(and (Name? name)
(or (not (incomplete-name-alias-map))
(not (free-id-table-ref (incomplete-name-alias-map) (Name-id name) #f)))))
(define-struct alias-def () #:inspector #f)
previous unresolved one
(define-struct (unresolved alias-def) (stx [in-process #:mutable] persistent?) #:inspector #f)
(define-struct (resolved alias-def) (ty) #:inspector #f)
(define the-mapping
(make-free-id-table))
(define (mapping-put! id v)
(free-id-table-set! the-mapping id v))
(define (register-type-alias id stx [persistent? #t])
(mapping-put! id (make-unresolved stx #f persistent?)))
(define (register-resolved-type-alias id ty)
(mapping-put! id (make-resolved ty)))
(define (lookup-type-alias id parse-type [k (lambda () (tc-error "Unknown type alias: ~a" (syntax-e id)))])
(match (or (free-id-table-ref the-mapping id #f)
(free-id-table-ref the-mapping (un-rename id) #f))
[#f (k)]
[(struct unresolved (stx #f _))
(resolve-type-alias id parse-type)]
[(struct unresolved (stx #t _))
(tc-error/stx stx "Recursive Type Alias Reference")]
[(struct resolved (t)) t]))
(define (resolve-type-alias id parse-type)
(define v (free-id-table-ref the-mapping id))
(match v
[(struct unresolved (stx _ persistent?))
(set-unresolved-in-process! v #t)
(let ([t (parse-type stx)])
(when persistent?
(mapping-put! id (make-resolved t)))
t)]
[(struct resolved (t))
t]))
(define (resolve-type-aliases parse-type)
(for ([id (in-list (free-id-table-keys the-mapping))])
(resolve-type-alias id parse-type)))
(define (type-alias-env-map f)
(for/list ([(id t) (in-sorted-free-id-table the-mapping)]
#:when (resolved? t))
(f id (resolved-ty t))))
(define (type-alias-env-for-each f)
(for ([(id t) (in-sorted-free-id-table the-mapping)]
#:when (resolved? t))
(f id (resolved-ty t))))
|
fff942a0e22fe082d79b5e5b65799696edac46e049564c726267b0d426ab020e | ibawt/chezuv | 125.body.scm | Copyright 2015 .
;;;
;;; Permission to copy this software, in whole or in part, to use this
;;; software for any lawful purpose, and to redistribute this software
;;; is granted subject to the restriction that all copies made of this
;;; software must include this copyright and permission notice in full.
;;;
;;; I also request that you send me a copy of any improvements that you
;;; make to this software so that they may be incorporated within it to
;;; the benefit of the Scheme community.
;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;; Private stuff, not exported.
Ten of the SRFI 125 procedures are deprecated , and another
two allow alternative arguments that are deprecated .
(define (issue-deprecated-warnings?) #t)
(define (issue-warning-deprecated name-of-deprecated-misfeature)
(if (not (memq name-of-deprecated-misfeature already-warned))
(begin
(set! already-warned
(cons name-of-deprecated-misfeature already-warned))
(if (issue-deprecated-warnings?)
(let ((out (current-error-port)))
(display "WARNING: " out)
(display name-of-deprecated-misfeature out)
(newline out)
(display " is deprecated by SRFI 125. See" out)
(newline out)
(display " " out)
(display url:deprecated out)
(newline out))))))
(define url:deprecated
"-125/srfi-125.html")
; List of deprecated features for which a warning has already
; been issued.
(define already-warned '())
Comparators contain a type test predicate , which implementations
;;; of the hash-table-set! procedure can use to reject invalid keys.
;;; That's hard to do without sacrificing interoperability with R6RS
and/or SRFI 69 and/or SRFI 126 hash tables .
;;;
;;; Full interoperability means the hash tables implemented here are
interchangeable with the SRFI 126 hashtables used to implement them .
SRFI 69 and R6RS and SRFI 126 hashtables do n't contain comparators ,
;;; so any association between a hash table and its comparator would have
;;; to be maintained outside the representation of hash tables themselves,
;;; which is problematic unless weak pointers are available.
;;;
;;; Not all of the hash tables implemented here will have comparators
;;; associated with them anyway, because an equivalence procedure
;;; and hash function can be used to create a hash table instead of
a comparator ( although that usage is deprecated by SRFI 125 ) .
;;;
One way to preserve interoperability while enforcing a comparator 's
;;; type test is to incorporate that test into a hash table's hash
;;; function. The advantage of doing that should be weighed against
;;; these disadvantages:
;;;
;;; If the type test is slow, then hashing would also be slower.
;;;
The R6RS , SRFI 69 , and SRFI 126 APIs allow extraction of
;;; a hash function from some hash tables.
;;; Some programmers might expect that hash function to be the
;;; hash function encapsulated by the comparator (in the sense
;;; of eq?, perhaps) even though this API makes no such guarantee
;;; (and extraction of that hash function from an existing hash
;;; table can only be done by calling a deprecated procedure).
;;; If %enforce-comparator-type-tests is true, then make-hash-table,
;;; when passed a comparator, will use a hash function that enforces
;;; the comparator's type test.
(define %enforce-comparator-type-tests #t)
;;; Given a comparator, return its hash function, possibly augmented
;;; by the comparator's type test.
(define (%comparator-hash-function comparator)
(let ((okay? (comparator-type-test-predicate comparator))
(hash-function (comparator-hash-function comparator)))
(if %enforce-comparator-type-tests
(lambda (x . rest)
(cond ((not (okay? x))
(error "key rejected by hash-table comparator"
x
comparator))
((null? rest)
(hash-function x))
(else
(apply hash-function x rest))))
hash-function)))
;;; A unique (in the sense of eq?) value that will never be found
;;; within a hash-table.
(define %not-found (list '%not-found))
;;; A unique (in the sense of eq?) value that escapes only as an irritant
;;; when a hash-table key is not found.
(define %not-found-irritant (list 'not-found))
;;; The error message used when a hash-table key is not found.
(define %not-found-message "hash-table key not found")
We let SRFI 126 decide which weakness is supported
(define (%check-optional-arguments procname args)
(if (memq 'thread-safe args)
(error (string-append (symbol->string procname)
": unsupported optional argument(s)")
args)))
(define (%get-hash-table-weakness args)
(cond
((memq 'ephemeral-values args)
(if (or (memq 'ephemeral-keys args)
(memq 'weak-keys args))
'ephemeral-key-and-value
'ephemeral-value))
((memq 'ephemeral-keys args)
(if (memq 'weak-values args)
'ephemeral-key-and-value
'ephemeral-key))
((memq 'weak-keys args)
(if (memq 'weak-values args)
'weak-key-and-value
'weak-key))
((memq 'weak-values args)
'weak-value)
(else #f)))
(define (%get-hash-table-capacity args)
(find fixnum? args))
This was exported by an earlier draft of SRFI 125 ,
;;; and is still used by hash-table=?
(define (hash-table-every proc ht)
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(let ((size (vector-length keys)))
(let loop ((i 0))
(or (fx>=? i size)
(let* ((key (vector-ref keys i))
(val (vector-ref vals i)))
(and (proc key val)
(loop (fx+ i 1))))))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;
;;; Exported procedures
;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;; Constructors.
The first argument can be a comparator or an equality predicate .
;;;
If the first argument is a comparator , any remaining arguments
;;; are implementation-dependent, but a non-negative exact integer
;;; should be interpreted as an initial capacity and the symbols
;;; thread-safe, weak-keys, ephemeral-keys, weak-values, and
;;; emphemeral-values should be interpreted specially. (These
;;; special symbols are distinct from the analogous special symbols
in SRFI 126 . )
;;;
If the first argument is not a comparator , then it had better
be an equality predicate ( which is deprecated by SRFI 125 ) .
If a second argument is present and is a procedure , then it 's
;;; a hash function (which is allowed only for the deprecated case
in which the first argument is an equality predicate ) . If a
second argument is not a procedure , then it 's some kind of
;;; implementation-dependent optional argument, as are all arguments
beyond the second .
;;;
SRFI 128 defines make - eq - comparator , make - eqv - comparator , and
;;; make-equal-comparator procedures whose hash function is the
default - hash procedure of SRFI 128 , which is inappropriate
;;; for use with eq? and eqv? unless the object being hashed is
never mutated . Neither SRFI 125 nor 128 provide any way to
;;; define a comparator whose hash function is truly compatible
;;; with the use of eq? or eqv? as an equality predicate.
;;;
That would make SRFI 125 almost as bad as SRFI 69 if not for
the following paragraph of SRFI 125 :
;;;
;;; Implementations are permitted to ignore user-specified
;;; hash functions in certain circumstances. Specifically,
;;; if the equality predicate, whether passed as part of a
;;; comparator or explicitly, is more fine-grained (in the
sense of R7RS - small section 6.1 ) than equal ? , the
;;; implementation is free — indeed, is encouraged — to
;;; ignore the user-specified hash function and use something
;;; implementation-dependent. This allows the use of addresses
;;; as hashes, in which case the keys must be rehashed if
;;; they are moved by the garbage collector. Such a hash
;;; function is unsafe to use outside the context of
;;; implementation-provided hash tables. It can of course be
;;; exposed by an implementation as an extension, with
;;; suitable warnings against inappropriate uses.
;;;
;;; That gives implementations permission to do something more
;;; useful, but when should implementations take advantage of
;;; that permission? This implementation uses the superior
solution provided by SRFI 126 whenever :
;;;
A comparator is passed as first argument and its equality
;;; predicate is eq? or eqv?.
;;;
The eq ? or eqv ? procedure is passed as first argument
;;; (which is a deprecated usage).
(define (make-hash-table comparator/equiv . rest)
(if (comparator? comparator/equiv)
(let ((equiv (comparator-equality-predicate comparator/equiv))
(hash-function (%comparator-hash-function comparator/equiv)))
(%make-hash-table equiv hash-function rest))
(let* ((equiv comparator/equiv)
(hash-function (if (and (not (null? rest))
(procedure? (car rest)))
(car rest)
#f))
(rest (if hash-function (cdr rest) rest)))
(issue-warning-deprecated 'srfi-69-style:make-hash-table)
(%make-hash-table equiv hash-function rest))))
(define (%make-hash-table equiv hash-function opts)
(%check-optional-arguments 'make-hash-table opts)
(let ((weakness (%get-hash-table-weakness opts))
(capacity (%get-hash-table-capacity opts)))
Use SRFI : 126 make - hashtable to handle capacity and weakness
(cond ((equal? equiv eq?)
(make-eq-hashtable capacity weakness))
((equal? equiv eqv?)
(make-eqv-hashtable capacity weakness))
(hash-function
(make-hashtable hash-function equiv capacity weakness))
((equal? equiv equal?)
(make-hashtable equal-hash equiv capacity weakness))
((equal? equiv string=?)
(make-hashtable string-hash equiv capacity weakness))
((equal? equiv string-ci=?)
(make-hashtable string-ci-hash equiv capacity weakness))
((equal? equiv symbol=?)
(make-hashtable symbol-hash equiv capacity weakness))
(else
(error "make-hash-table: unable to infer hash function"
equiv)))))
(define (hash-table comparator . rest)
(let ((ht (apply make-hash-table comparator rest)))
(let loop ((kvs rest))
(cond
((null? kvs) #f)
((null? (cdr kvs)) (error "hash-table: wrong number of arguments"))
((hashtable-contains? ht (car kvs))
(error "hash-table: two equivalent keys were provided"
(car kvs)))
(else (hashtable-set! ht (car kvs) (cadr kvs))
(loop (cddr kvs)))))
(hash-table-copy ht #f)))
(define (hash-table-unfold stop? mapper successor seed comparator . rest)
(let ((ht (apply make-hash-table comparator rest)))
(let loop ((seed seed))
(if (stop? seed)
ht
(call-with-values
(lambda () (mapper seed))
(lambda (key val)
(hash-table-set! ht key val)
(loop (successor seed))))))))
(define (alist->hash-table alist comparator/equiv . rest)
(if (and (not (null? rest))
(procedure? (car rest)))
(issue-warning-deprecated 'srfi-69-style:alist->hash-table))
(let ((ht (apply make-hash-table comparator/equiv rest))
(entries (reverse alist)))
(for-each (lambda (entry)
(hash-table-set! ht (car entry) (cdr entry)))
entries)
ht))
;;; Predicates.
;; (define (hash-table? obj)
;; (hashtable? obj))
;; (define (hash-table-contains? ht key)
;; (hashtable-contains? ht key))
;; (define (hash-table-empty? ht)
;; (hashtable-empty? ht))
(define (hash-table=? value-comparator ht1 ht2)
(let ((val=? (comparator-equality-predicate value-comparator))
(n1 (hash-table-size ht1))
(n2 (hash-table-size ht2)))
(and (= n1 n2)
(eq? (hashtable-equivalence-function ht1)
(hashtable-equivalence-function ht2))
(hash-table-every (lambda (key val1)
(and (hash-table-contains? ht2 key)
(val=? val1
(hashtable-ref ht2 key 'ignored))))
ht1))))
(define (hash-table-mutable? ht)
(hashtable-mutable? ht))
;;; Accessors.
(define hash-table-ref
(case-lambda
((ht key) (hashtable-ref ht key))
((ht key failure)
(let ((val (hashtable-ref ht key %not-found)))
(if (eq? val %not-found)
(failure)
val)))
((ht key failure success)
(let ((val (hashtable-ref ht key %not-found)))
(if (eq? val %not-found)
(failure)
(success val))))))
(define (hash-table-ref/default ht key default)
(hashtable-ref ht key default))
;;; Mutators.
(define hash-table-set!
(case-lambda
((ht) #f)
((ht key val) (hashtable-set! ht key val))
((ht key1 val1 key2 val2 . others)
(hashtable-set! ht key1 val1)
(hashtable-set! ht key2 val2)
(apply hash-table-set! ht others))))
(define (hash-table-delete! ht . keys)
(let ((count 0))
(for-each (lambda (key)
(when (hashtable-contains? ht key)
(set! count (fx+ 1 count))
(hashtable-delete! ht key)))
keys)
count))
;; (define (hash-table-intern! ht key failure)
;; (hashtable-intern! ht key failure))
(define hash-table-update!
(case-lambda
((ht key updater)
(hashtable-update! ht key updater))
((ht key updater failure)
(let ((updater* (lambda (val)
(if (eq? %not-found val)
(updater (failure))
(updater val)))))
(hashtable-update! ht key updater* %not-found)))
((ht key updater failure success)
(let* ((updater* (lambda (val)
(if (eq? %not-found val)
(updater (failure))
(success (updater val))))))
(hashtable-update! ht key updater* %not-found)))))
(define (hash-table-update!/default ht key updater default)
(hashtable-update! ht key updater default))
;; (define (hash-table-pop! ht)
;; (hashtable-pop! ht))
;; (define (hash-table-clear! ht)
;; (hashtable-clear! ht))
;;; The whole hash table.
;; (define (hash-table-size ht)
;; (hashtable-size ht))
(define (hash-table-keys ht)
(vector->list (hashtable-keys ht)))
(define (hash-table-values ht)
(vector->list (hashtable-values ht)))
(define (hash-table-entries ht)
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(values (vector->list keys)
(vector->list vals)))))
(define (hash-table-find proc ht failure)
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(let ((size (vector-length keys)))
(let loop ((i 0))
(if (fx>=? i size)
(failure)
(let* ((key (vector-ref keys i))
(val (vector-ref vals i))
(x (proc key val)))
(or x (loop (fx+ i 1))))))))))
(define (hash-table-count pred ht)
(let ((count 0))
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(vector-for-each (lambda (key val)
(if (pred key val) (set! count (fx+ count 1))))
keys vals)))
count))
;;; Mapping and folding.
(define (hash-table-map proc comparator ht)
(let ((result (make-hash-table comparator)))
(hash-table-for-each
(lambda (key val)
(hash-table-set! result key (proc val)))
ht)
result))
(define (hash-table-map->list proc ht)
(call-with-values
(lambda () (hash-table-entries ht))
(lambda (keys vals)
(map proc keys vals))))
;;; With this particular implementation, the proc can safely mutate ht.
;;; That property is not guaranteed by the specification, but can be
;;; relied upon by procedures defined in this file.
(define (hash-table-for-each proc ht)
(hashtable-walk ht proc))
(define (hash-table-map! proc ht)
(hashtable-update-all! ht proc))
(define (hash-table-fold proc init ht)
(if (hashtable? proc)
(deprecated:hash-table-fold proc init ht)
(hashtable-sum ht init proc)))
(define (hash-table-prune! proc ht)
(hashtable-prune! ht proc))
;;; Copying and conversion.
;; (define hash-table-copy hashtable-copy)
(define (hash-table-empty-copy ht)
(let* ((ht2 (hash-table-copy ht #t))
(ignored (hash-table-clear! ht2)))
ht2))
(define (hash-table->alist ht)
(call-with-values
(lambda () (hash-table-entries ht))
(lambda (keys vals)
(map cons keys vals))))
;;; Hash tables as sets.
(define (hash-table-union! ht1 ht2)
(hash-table-for-each
(lambda (key2 val2)
(if (not (hashtable-contains? ht1 key2))
(hashtable-set! ht1 key2 val2)))
ht2)
ht1)
(define (hash-table-intersection! ht1 ht2)
(hash-table-for-each
(lambda (key1 val1)
(if (not (hashtable-contains? ht2 key1))
(hashtable-delete! ht1 key1)))
ht1)
ht1)
(define (hash-table-difference! ht1 ht2)
(hash-table-for-each
(lambda (key1 val1)
(if (hashtable-contains? ht2 key1)
(hashtable-delete! ht1 key1)))
ht1)
ht1)
(define (hash-table-xor! ht1 ht2)
(hash-table-for-each
(lambda (key2 val2)
(if (hashtable-contains? ht1 key2)
(hashtable-delete! ht1 key2)
(hashtable-set! ht1 key2 val2)))
ht2)
ht1)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;
The following procedures are deprecated by SRFI 125 , but must
;;; be exported nonetheless.
;;;
Programs that import the ( srfi 125 ) library must rename the
;;; deprecated string-hash and string-ci-hash procedures to avoid
;;; conflict with the string-hash and string-ci-hash procedures
exported by SRFI 126 and SRFI 128 .
;;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define (deprecated:hash obj . rest)
(issue-warning-deprecated 'hash)
(default-hash obj))
(define (deprecated:string-hash obj . rest)
(issue-warning-deprecated 'srfi-125:string-hash)
(string-hash obj))
(define (deprecated:string-ci-hash obj . rest)
(issue-warning-deprecated 'srfi-125:string-ci-hash)
(string-ci-hash obj))
(define (deprecated:hash-by-identity obj . rest)
(issue-warning-deprecated 'hash-by-identity)
(deprecated:hash obj))
(define (deprecated:hash-table-equivalence-function ht)
(issue-warning-deprecated 'hash-table-equivalence-function)
(hashtable-equivalence-function ht))
(define (deprecated:hash-table-hash-function ht)
(issue-warning-deprecated 'hash-table-hash-function)
(hashtable-hash-function ht))
(define (deprecated:hash-table-exists? ht key)
(issue-warning-deprecated 'hash-table-exists?)
(hash-table-contains? ht key))
(define (deprecated:hash-table-walk ht proc)
(issue-warning-deprecated 'hash-table-walk)
(hash-table-for-each proc ht))
(define (deprecated:hash-table-fold ht proc seed)
(issue-warning-deprecated 'srfi-69-style:hash-table-fold)
(hash-table-fold proc seed ht))
(define (deprecated:hash-table-merge! ht1 ht2)
(issue-warning-deprecated 'hash-table-merge!)
(hash-table-union! ht1 ht2))
eof
| null | https://raw.githubusercontent.com/ibawt/chezuv/0075ebe8f2529d916946aaa7e441b5134c4ab0e5/vendor/srfi/%253a125/125.body.scm | scheme |
Permission to copy this software, in whole or in part, to use this
software for any lawful purpose, and to redistribute this software
is granted subject to the restriction that all copies made of this
software must include this copyright and permission notice in full.
I also request that you send me a copy of any improvements that you
make to this software so that they may be incorporated within it to
the benefit of the Scheme community.
Private stuff, not exported.
List of deprecated features for which a warning has already
been issued.
of the hash-table-set! procedure can use to reject invalid keys.
That's hard to do without sacrificing interoperability with R6RS
Full interoperability means the hash tables implemented here are
so any association between a hash table and its comparator would have
to be maintained outside the representation of hash tables themselves,
which is problematic unless weak pointers are available.
Not all of the hash tables implemented here will have comparators
associated with them anyway, because an equivalence procedure
and hash function can be used to create a hash table instead of
type test is to incorporate that test into a hash table's hash
function. The advantage of doing that should be weighed against
these disadvantages:
If the type test is slow, then hashing would also be slower.
a hash function from some hash tables.
Some programmers might expect that hash function to be the
hash function encapsulated by the comparator (in the sense
of eq?, perhaps) even though this API makes no such guarantee
(and extraction of that hash function from an existing hash
table can only be done by calling a deprecated procedure).
If %enforce-comparator-type-tests is true, then make-hash-table,
when passed a comparator, will use a hash function that enforces
the comparator's type test.
Given a comparator, return its hash function, possibly augmented
by the comparator's type test.
A unique (in the sense of eq?) value that will never be found
within a hash-table.
A unique (in the sense of eq?) value that escapes only as an irritant
when a hash-table key is not found.
The error message used when a hash-table key is not found.
and is still used by hash-table=?
Exported procedures
Constructors.
are implementation-dependent, but a non-negative exact integer
should be interpreted as an initial capacity and the symbols
thread-safe, weak-keys, ephemeral-keys, weak-values, and
emphemeral-values should be interpreted specially. (These
special symbols are distinct from the analogous special symbols
a hash function (which is allowed only for the deprecated case
implementation-dependent optional argument, as are all arguments
make-equal-comparator procedures whose hash function is the
for use with eq? and eqv? unless the object being hashed is
define a comparator whose hash function is truly compatible
with the use of eq? or eqv? as an equality predicate.
Implementations are permitted to ignore user-specified
hash functions in certain circumstances. Specifically,
if the equality predicate, whether passed as part of a
comparator or explicitly, is more fine-grained (in the
implementation is free — indeed, is encouraged — to
ignore the user-specified hash function and use something
implementation-dependent. This allows the use of addresses
as hashes, in which case the keys must be rehashed if
they are moved by the garbage collector. Such a hash
function is unsafe to use outside the context of
implementation-provided hash tables. It can of course be
exposed by an implementation as an extension, with
suitable warnings against inappropriate uses.
That gives implementations permission to do something more
useful, but when should implementations take advantage of
that permission? This implementation uses the superior
predicate is eq? or eqv?.
(which is a deprecated usage).
Predicates.
(define (hash-table? obj)
(hashtable? obj))
(define (hash-table-contains? ht key)
(hashtable-contains? ht key))
(define (hash-table-empty? ht)
(hashtable-empty? ht))
Accessors.
Mutators.
(define (hash-table-intern! ht key failure)
(hashtable-intern! ht key failure))
(define (hash-table-pop! ht)
(hashtable-pop! ht))
(define (hash-table-clear! ht)
(hashtable-clear! ht))
The whole hash table.
(define (hash-table-size ht)
(hashtable-size ht))
Mapping and folding.
With this particular implementation, the proc can safely mutate ht.
That property is not guaranteed by the specification, but can be
relied upon by procedures defined in this file.
Copying and conversion.
(define hash-table-copy hashtable-copy)
Hash tables as sets.
be exported nonetheless.
deprecated string-hash and string-ci-hash procedures to avoid
conflict with the string-hash and string-ci-hash procedures
| Copyright 2015 .
Ten of the SRFI 125 procedures are deprecated , and another
two allow alternative arguments that are deprecated .
(define (issue-deprecated-warnings?) #t)
(define (issue-warning-deprecated name-of-deprecated-misfeature)
(if (not (memq name-of-deprecated-misfeature already-warned))
(begin
(set! already-warned
(cons name-of-deprecated-misfeature already-warned))
(if (issue-deprecated-warnings?)
(let ((out (current-error-port)))
(display "WARNING: " out)
(display name-of-deprecated-misfeature out)
(newline out)
(display " is deprecated by SRFI 125. See" out)
(newline out)
(display " " out)
(display url:deprecated out)
(newline out))))))
(define url:deprecated
"-125/srfi-125.html")
(define already-warned '())
Comparators contain a type test predicate , which implementations
and/or SRFI 69 and/or SRFI 126 hash tables .
interchangeable with the SRFI 126 hashtables used to implement them .
SRFI 69 and R6RS and SRFI 126 hashtables do n't contain comparators ,
a comparator ( although that usage is deprecated by SRFI 125 ) .
One way to preserve interoperability while enforcing a comparator 's
The R6RS , SRFI 69 , and SRFI 126 APIs allow extraction of
(define %enforce-comparator-type-tests #t)
(define (%comparator-hash-function comparator)
(let ((okay? (comparator-type-test-predicate comparator))
(hash-function (comparator-hash-function comparator)))
(if %enforce-comparator-type-tests
(lambda (x . rest)
(cond ((not (okay? x))
(error "key rejected by hash-table comparator"
x
comparator))
((null? rest)
(hash-function x))
(else
(apply hash-function x rest))))
hash-function)))
(define %not-found (list '%not-found))
(define %not-found-irritant (list 'not-found))
(define %not-found-message "hash-table key not found")
We let SRFI 126 decide which weakness is supported
(define (%check-optional-arguments procname args)
(if (memq 'thread-safe args)
(error (string-append (symbol->string procname)
": unsupported optional argument(s)")
args)))
(define (%get-hash-table-weakness args)
(cond
((memq 'ephemeral-values args)
(if (or (memq 'ephemeral-keys args)
(memq 'weak-keys args))
'ephemeral-key-and-value
'ephemeral-value))
((memq 'ephemeral-keys args)
(if (memq 'weak-values args)
'ephemeral-key-and-value
'ephemeral-key))
((memq 'weak-keys args)
(if (memq 'weak-values args)
'weak-key-and-value
'weak-key))
((memq 'weak-values args)
'weak-value)
(else #f)))
(define (%get-hash-table-capacity args)
(find fixnum? args))
This was exported by an earlier draft of SRFI 125 ,
(define (hash-table-every proc ht)
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(let ((size (vector-length keys)))
(let loop ((i 0))
(or (fx>=? i size)
(let* ((key (vector-ref keys i))
(val (vector-ref vals i)))
(and (proc key val)
(loop (fx+ i 1))))))))))
The first argument can be a comparator or an equality predicate .
If the first argument is a comparator , any remaining arguments
in SRFI 126 . )
If the first argument is not a comparator , then it had better
be an equality predicate ( which is deprecated by SRFI 125 ) .
If a second argument is present and is a procedure , then it 's
in which the first argument is an equality predicate ) . If a
second argument is not a procedure , then it 's some kind of
beyond the second .
SRFI 128 defines make - eq - comparator , make - eqv - comparator , and
default - hash procedure of SRFI 128 , which is inappropriate
never mutated . Neither SRFI 125 nor 128 provide any way to
That would make SRFI 125 almost as bad as SRFI 69 if not for
the following paragraph of SRFI 125 :
sense of R7RS - small section 6.1 ) than equal ? , the
solution provided by SRFI 126 whenever :
A comparator is passed as first argument and its equality
The eq ? or eqv ? procedure is passed as first argument
(define (make-hash-table comparator/equiv . rest)
(if (comparator? comparator/equiv)
(let ((equiv (comparator-equality-predicate comparator/equiv))
(hash-function (%comparator-hash-function comparator/equiv)))
(%make-hash-table equiv hash-function rest))
(let* ((equiv comparator/equiv)
(hash-function (if (and (not (null? rest))
(procedure? (car rest)))
(car rest)
#f))
(rest (if hash-function (cdr rest) rest)))
(issue-warning-deprecated 'srfi-69-style:make-hash-table)
(%make-hash-table equiv hash-function rest))))
(define (%make-hash-table equiv hash-function opts)
(%check-optional-arguments 'make-hash-table opts)
(let ((weakness (%get-hash-table-weakness opts))
(capacity (%get-hash-table-capacity opts)))
Use SRFI : 126 make - hashtable to handle capacity and weakness
(cond ((equal? equiv eq?)
(make-eq-hashtable capacity weakness))
((equal? equiv eqv?)
(make-eqv-hashtable capacity weakness))
(hash-function
(make-hashtable hash-function equiv capacity weakness))
((equal? equiv equal?)
(make-hashtable equal-hash equiv capacity weakness))
((equal? equiv string=?)
(make-hashtable string-hash equiv capacity weakness))
((equal? equiv string-ci=?)
(make-hashtable string-ci-hash equiv capacity weakness))
((equal? equiv symbol=?)
(make-hashtable symbol-hash equiv capacity weakness))
(else
(error "make-hash-table: unable to infer hash function"
equiv)))))
(define (hash-table comparator . rest)
(let ((ht (apply make-hash-table comparator rest)))
(let loop ((kvs rest))
(cond
((null? kvs) #f)
((null? (cdr kvs)) (error "hash-table: wrong number of arguments"))
((hashtable-contains? ht (car kvs))
(error "hash-table: two equivalent keys were provided"
(car kvs)))
(else (hashtable-set! ht (car kvs) (cadr kvs))
(loop (cddr kvs)))))
(hash-table-copy ht #f)))
(define (hash-table-unfold stop? mapper successor seed comparator . rest)
(let ((ht (apply make-hash-table comparator rest)))
(let loop ((seed seed))
(if (stop? seed)
ht
(call-with-values
(lambda () (mapper seed))
(lambda (key val)
(hash-table-set! ht key val)
(loop (successor seed))))))))
(define (alist->hash-table alist comparator/equiv . rest)
(if (and (not (null? rest))
(procedure? (car rest)))
(issue-warning-deprecated 'srfi-69-style:alist->hash-table))
(let ((ht (apply make-hash-table comparator/equiv rest))
(entries (reverse alist)))
(for-each (lambda (entry)
(hash-table-set! ht (car entry) (cdr entry)))
entries)
ht))
(define (hash-table=? value-comparator ht1 ht2)
(let ((val=? (comparator-equality-predicate value-comparator))
(n1 (hash-table-size ht1))
(n2 (hash-table-size ht2)))
(and (= n1 n2)
(eq? (hashtable-equivalence-function ht1)
(hashtable-equivalence-function ht2))
(hash-table-every (lambda (key val1)
(and (hash-table-contains? ht2 key)
(val=? val1
(hashtable-ref ht2 key 'ignored))))
ht1))))
(define (hash-table-mutable? ht)
(hashtable-mutable? ht))
(define hash-table-ref
(case-lambda
((ht key) (hashtable-ref ht key))
((ht key failure)
(let ((val (hashtable-ref ht key %not-found)))
(if (eq? val %not-found)
(failure)
val)))
((ht key failure success)
(let ((val (hashtable-ref ht key %not-found)))
(if (eq? val %not-found)
(failure)
(success val))))))
(define (hash-table-ref/default ht key default)
(hashtable-ref ht key default))
(define hash-table-set!
(case-lambda
((ht) #f)
((ht key val) (hashtable-set! ht key val))
((ht key1 val1 key2 val2 . others)
(hashtable-set! ht key1 val1)
(hashtable-set! ht key2 val2)
(apply hash-table-set! ht others))))
(define (hash-table-delete! ht . keys)
(let ((count 0))
(for-each (lambda (key)
(when (hashtable-contains? ht key)
(set! count (fx+ 1 count))
(hashtable-delete! ht key)))
keys)
count))
(define hash-table-update!
(case-lambda
((ht key updater)
(hashtable-update! ht key updater))
((ht key updater failure)
(let ((updater* (lambda (val)
(if (eq? %not-found val)
(updater (failure))
(updater val)))))
(hashtable-update! ht key updater* %not-found)))
((ht key updater failure success)
(let* ((updater* (lambda (val)
(if (eq? %not-found val)
(updater (failure))
(success (updater val))))))
(hashtable-update! ht key updater* %not-found)))))
(define (hash-table-update!/default ht key updater default)
(hashtable-update! ht key updater default))
(define (hash-table-keys ht)
(vector->list (hashtable-keys ht)))
(define (hash-table-values ht)
(vector->list (hashtable-values ht)))
(define (hash-table-entries ht)
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(values (vector->list keys)
(vector->list vals)))))
(define (hash-table-find proc ht failure)
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(let ((size (vector-length keys)))
(let loop ((i 0))
(if (fx>=? i size)
(failure)
(let* ((key (vector-ref keys i))
(val (vector-ref vals i))
(x (proc key val)))
(or x (loop (fx+ i 1))))))))))
(define (hash-table-count pred ht)
(let ((count 0))
(call-with-values
(lambda () (hashtable-entries ht))
(lambda (keys vals)
(vector-for-each (lambda (key val)
(if (pred key val) (set! count (fx+ count 1))))
keys vals)))
count))
(define (hash-table-map proc comparator ht)
(let ((result (make-hash-table comparator)))
(hash-table-for-each
(lambda (key val)
(hash-table-set! result key (proc val)))
ht)
result))
(define (hash-table-map->list proc ht)
(call-with-values
(lambda () (hash-table-entries ht))
(lambda (keys vals)
(map proc keys vals))))
(define (hash-table-for-each proc ht)
(hashtable-walk ht proc))
(define (hash-table-map! proc ht)
(hashtable-update-all! ht proc))
(define (hash-table-fold proc init ht)
(if (hashtable? proc)
(deprecated:hash-table-fold proc init ht)
(hashtable-sum ht init proc)))
(define (hash-table-prune! proc ht)
(hashtable-prune! ht proc))
(define (hash-table-empty-copy ht)
(let* ((ht2 (hash-table-copy ht #t))
(ignored (hash-table-clear! ht2)))
ht2))
(define (hash-table->alist ht)
(call-with-values
(lambda () (hash-table-entries ht))
(lambda (keys vals)
(map cons keys vals))))
(define (hash-table-union! ht1 ht2)
(hash-table-for-each
(lambda (key2 val2)
(if (not (hashtable-contains? ht1 key2))
(hashtable-set! ht1 key2 val2)))
ht2)
ht1)
(define (hash-table-intersection! ht1 ht2)
(hash-table-for-each
(lambda (key1 val1)
(if (not (hashtable-contains? ht2 key1))
(hashtable-delete! ht1 key1)))
ht1)
ht1)
(define (hash-table-difference! ht1 ht2)
(hash-table-for-each
(lambda (key1 val1)
(if (hashtable-contains? ht2 key1)
(hashtable-delete! ht1 key1)))
ht1)
ht1)
(define (hash-table-xor! ht1 ht2)
(hash-table-for-each
(lambda (key2 val2)
(if (hashtable-contains? ht1 key2)
(hashtable-delete! ht1 key2)
(hashtable-set! ht1 key2 val2)))
ht2)
ht1)
The following procedures are deprecated by SRFI 125 , but must
Programs that import the ( srfi 125 ) library must rename the
exported by SRFI 126 and SRFI 128 .
(define (deprecated:hash obj . rest)
(issue-warning-deprecated 'hash)
(default-hash obj))
(define (deprecated:string-hash obj . rest)
(issue-warning-deprecated 'srfi-125:string-hash)
(string-hash obj))
(define (deprecated:string-ci-hash obj . rest)
(issue-warning-deprecated 'srfi-125:string-ci-hash)
(string-ci-hash obj))
(define (deprecated:hash-by-identity obj . rest)
(issue-warning-deprecated 'hash-by-identity)
(deprecated:hash obj))
(define (deprecated:hash-table-equivalence-function ht)
(issue-warning-deprecated 'hash-table-equivalence-function)
(hashtable-equivalence-function ht))
(define (deprecated:hash-table-hash-function ht)
(issue-warning-deprecated 'hash-table-hash-function)
(hashtable-hash-function ht))
(define (deprecated:hash-table-exists? ht key)
(issue-warning-deprecated 'hash-table-exists?)
(hash-table-contains? ht key))
(define (deprecated:hash-table-walk ht proc)
(issue-warning-deprecated 'hash-table-walk)
(hash-table-for-each proc ht))
(define (deprecated:hash-table-fold ht proc seed)
(issue-warning-deprecated 'srfi-69-style:hash-table-fold)
(hash-table-fold proc seed ht))
(define (deprecated:hash-table-merge! ht1 ht2)
(issue-warning-deprecated 'hash-table-merge!)
(hash-table-union! ht1 ht2))
eof
|
d4f6c65a9b49e27c9959bf766f95875c913ea92be4da401e85c05ebd2591f1a5 | fosskers/aura | Dependencies.hs | # LANGUAGE DeriveGeneric #
{-# LANGUAGE LambdaCase #-}
# LANGUAGE TupleSections #
-- |
Module : Aura . Dependencies
Copyright : ( c ) , 2012 - 2021
-- License : GPL3
Maintainer : < >
--
-- Library for handling package dependencies and version conflicts.
module Aura.Dependencies ( resolveDeps ) where
import Algebra.Graph.AdjacencyMap
import Algebra.Graph.AdjacencyMap.Algorithm (scc)
import qualified Algebra.Graph.NonEmpty.AdjacencyMap as NAM
import Algebra.Graph.ToGraph (isAcyclic)
import Aura.Core
import Aura.IO
import Aura.Languages
import Aura.Settings
import Aura.Types
import Aura.Utils
import Data.Versions hiding (Lens')
import RIO
import RIO.Lens (each)
import qualified RIO.Map as M
import qualified RIO.NonEmpty as NEL
import qualified RIO.Set as S
import qualified RIO.Text as T
---
-- | The results of dependency resolution.
data Resolution = Resolution
{ toInstall :: !(Map PkgName Package)
, satisfied :: !(Set PkgName) }
deriving (Generic)
toInstallL :: Lens' Resolution (Map PkgName Package)
toInstallL f r = (\m -> r { toInstall = m }) <$> f (toInstall r)
satisfiedL :: Lens' Resolution (Set PkgName)
satisfiedL f r = (\s -> r { satisfied = s }) <$> f (satisfied r)
-- | Given some `Package`s, determine its full dependency graph.
-- The graph is collapsed into layers of packages which are not
-- interdependent, and thus can be built and installed as a group.
--
-- Deeper layers of the result list (generally) depend on the previous layers.
resolveDeps :: Repository -> NonEmpty Package -> RIO Env (NonEmpty (NonEmpty Package))
resolveDeps repo ps = do
logDebug "resolveDeps: Entered."
ss <- asks settings
res <- liftIO (Right <$> resolveDeps' ss repo ps) `catchAny` handleError
Resolution m s <- either throwM pure res
logDebug "resolveDeps: Successful recursive dep lookup."
unless (length ps == length m) $ putText "\n"
let de = conflicts ss m s
unless (null de) . throwM . Failure . FailMsg $ missingPkg_2 de
either throwM pure $ sortInstall m
where
handleError :: SomeException -> RIO Env (Either Failure a)
handleError e = pure . Left . Failure . FailMsg . dependencyLookup_1 $ tshow e
| Solve dependencies for a set of ` assumed to not be
-- installed/satisfied.
resolveDeps' :: Settings -> Repository -> NonEmpty Package -> IO Resolution
resolveDeps' ss repo ps = resolve (Resolution mempty mempty) ps
where
-- | Only searches for packages that we haven't checked yet.
resolve :: Resolution -> NonEmpty Package -> IO Resolution
resolve r@(Resolution m _) xs = maybe' (pure r) (NEL.nonEmpty goods) $ \goods' -> do
let m' = M.fromList . map (pname &&& id) $ toList goods'
r' = r & toInstallL %~ (<> m')
these (const $ pure r') (satisfy r') (const $ satisfy r') $ dividePkgs goods'
where
goods :: [Package]
goods = NEL.filter (\p -> not $ pname p `M.member` m) xs
-- | All dependencies from all potential `Buildable`s.
allDeps :: NonEmpty Buildable -> Set Dep
allDeps = foldMap1 (S.fromList . (^.. to bDeps . each))
| which are not yet queued for install .
freshDeps :: Resolution -> Set Dep -> Set Dep
freshDeps (Resolution m s) = S.filter f
where
f :: Dep -> Bool
f d = let n = dName d in not $ M.member n m || S.member n s
| Consider only " unsatisfied " .
satisfy :: Resolution -> NonEmpty Buildable -> IO Resolution
satisfy r bs = maybe' (pure r) (nes . freshDeps r $ allDeps bs) $
areSatisfied (envOf ss) >=> these (lookups r) (pure . r') (\uns sat -> lookups (r' sat) uns)
where
r' :: Satisfied -> Resolution
r' (Satisfied sat) = r & satisfiedL %~ (<> f sat)
-- | Unique names of some dependencies.
f :: NonEmpty Dep -> Set PkgName
f = S.fromList . NEL.toList . NEL.map dName
| Lookup unsatisfied and recurse the entire lookup process .
lookups :: Resolution -> Unsatisfied -> IO Resolution
lookups r (Unsatisfied ds) = do
let names = NEL.map dName ds
repoLookup repo ss names >>= \case
Nothing -> throwString "Unexpected AUR Connection Error"
Just (bads, could) -> case nes could of
Nothing -> do
let badNames = unwords . map (T.unpack . pnName) $ S.toList bads
throwString $ "Non-existant deps: " <> badNames
Just goods -> resolve r goods
conflicts :: Settings -> Map PkgName Package -> Set PkgName -> [DepError]
conflicts ss m s = foldMap f m
where
pm :: Map PkgName Package
pm = M.fromList $ map (\p -> (provides $ pprov p, p)) $ toList m
f :: Package -> [DepError]
f (FromRepo _) = []
f (FromAUR b) = flip mapMaybe (bDeps b) $ \d ->
let dn = dName d
Do n't do conflict checks for which are known to be satisfied on
-- the system.
in if S.member dn s then Nothing
else case M.lookup dn m <|> M.lookup dn pm of
Nothing -> Just . NonExistant dn $ bName b
Just p -> realPkgConflicts ss (bName b) p d
sortInstall :: Map PkgName Package -> Either Failure (NonEmpty (NonEmpty Package))
sortInstall m = case cycles depGraph of
[] -> note (Failure $ FailMsg missingPkg_3) . NEL.nonEmpty . mapMaybe nes $ batch depGraph
cs -> Left . Failure . FailMsg . missingPkg_4 $ map (NEL.map pname . NAM.vertexList1) cs
where
f :: Package -> [(Package, Package)]
f (FromRepo _) = []
f p@(FromAUR b) = mapMaybe (\d -> fmap (p,) $ dName d `M.lookup` m)
TODO handle " provides " ?
depGraph = overlay connected singles
elems = M.elems m
connected = edges $ foldMap f elems
singles = overlays $ map vertex elems
cycles :: Ord a => AdjacencyMap a -> [NAM.AdjacencyMap a]
cycles = filter (not . isAcyclic) . vertexList . scc
-- | Find the vertices that have no dependencies.
-- O(n) complexity.
leaves :: Ord a => AdjacencyMap a -> Set a
leaves x = S.filter (null . flip postSet x) $ vertexSet x
-- | Split a graph into batches of mutually independent vertices.
-- Probably O(m * n * log(n)) complexity.
batch :: Ord a => AdjacencyMap a -> [Set a]
batch g | isEmpty g = []
| otherwise = ls : batch (induce (`S.notMember` ls) g)
where ls = leaves g
-- | Questions to be answered in conflict checks:
1 . Is the package ignored in ` pacman.conf ` ?
2 . Is the version requested different from the one provided by
-- the most recent version?
realPkgConflicts :: Settings -> PkgName -> Package -> Dep -> Maybe DepError
realPkgConflicts ss parent pkg dep
| pn `elem` toIgnore = Just $ Ignored failMsg1
| isVersionConflict reqVer curVer = Just $ VerConflict failMsg2
| otherwise = Nothing
where pn = pname pkg
curVer = pver pkg & release .~ []
reqVer = dDemand dep & _VersionDemand . release .~ []
lang = langOf ss
toIgnore = ignoresOf ss
failMsg1 = getRealPkgConflicts_2 pn lang
failMsg2 = getRealPkgConflicts_1 parent pn (prettyV curVer) (T.pack $ show reqVer) lang
| Compares a ( r)equested version number with a ( c)urrent up - to - date one .
The ` MustBe ` case uses regexes . A dependency demanding version 7.4
SHOULD match as ` okay ` against version 7.4 , 7.4.0.1 , or even 7.4.0.1 - 2 .
isVersionConflict :: VersionDemand -> Versioning -> Bool
isVersionConflict Anything _ = False
isVersionConflict (LessThan r) c = c >= r
isVersionConflict (MoreThan r) c = c <= r
isVersionConflict (MustBe r) c = c /= r
isVersionConflict (AtLeast r) c = c < r
| null | https://raw.githubusercontent.com/fosskers/aura/08cd46eaa598094f7395455d66690d3d8c59e965/haskell/aura/lib/Aura/Dependencies.hs | haskell | # LANGUAGE LambdaCase #
|
License : GPL3
Library for handling package dependencies and version conflicts.
-
| The results of dependency resolution.
| Given some `Package`s, determine its full dependency graph.
The graph is collapsed into layers of packages which are not
interdependent, and thus can be built and installed as a group.
Deeper layers of the result list (generally) depend on the previous layers.
installed/satisfied.
| Only searches for packages that we haven't checked yet.
| All dependencies from all potential `Buildable`s.
| Unique names of some dependencies.
the system.
| Find the vertices that have no dependencies.
O(n) complexity.
| Split a graph into batches of mutually independent vertices.
Probably O(m * n * log(n)) complexity.
| Questions to be answered in conflict checks:
the most recent version? | # LANGUAGE DeriveGeneric #
# LANGUAGE TupleSections #
Module : Aura . Dependencies
Copyright : ( c ) , 2012 - 2021
Maintainer : < >
module Aura.Dependencies ( resolveDeps ) where
import Algebra.Graph.AdjacencyMap
import Algebra.Graph.AdjacencyMap.Algorithm (scc)
import qualified Algebra.Graph.NonEmpty.AdjacencyMap as NAM
import Algebra.Graph.ToGraph (isAcyclic)
import Aura.Core
import Aura.IO
import Aura.Languages
import Aura.Settings
import Aura.Types
import Aura.Utils
import Data.Versions hiding (Lens')
import RIO
import RIO.Lens (each)
import qualified RIO.Map as M
import qualified RIO.NonEmpty as NEL
import qualified RIO.Set as S
import qualified RIO.Text as T
data Resolution = Resolution
{ toInstall :: !(Map PkgName Package)
, satisfied :: !(Set PkgName) }
deriving (Generic)
toInstallL :: Lens' Resolution (Map PkgName Package)
toInstallL f r = (\m -> r { toInstall = m }) <$> f (toInstall r)
satisfiedL :: Lens' Resolution (Set PkgName)
satisfiedL f r = (\s -> r { satisfied = s }) <$> f (satisfied r)
resolveDeps :: Repository -> NonEmpty Package -> RIO Env (NonEmpty (NonEmpty Package))
resolveDeps repo ps = do
logDebug "resolveDeps: Entered."
ss <- asks settings
res <- liftIO (Right <$> resolveDeps' ss repo ps) `catchAny` handleError
Resolution m s <- either throwM pure res
logDebug "resolveDeps: Successful recursive dep lookup."
unless (length ps == length m) $ putText "\n"
let de = conflicts ss m s
unless (null de) . throwM . Failure . FailMsg $ missingPkg_2 de
either throwM pure $ sortInstall m
where
handleError :: SomeException -> RIO Env (Either Failure a)
handleError e = pure . Left . Failure . FailMsg . dependencyLookup_1 $ tshow e
| Solve dependencies for a set of ` assumed to not be
resolveDeps' :: Settings -> Repository -> NonEmpty Package -> IO Resolution
resolveDeps' ss repo ps = resolve (Resolution mempty mempty) ps
where
resolve :: Resolution -> NonEmpty Package -> IO Resolution
resolve r@(Resolution m _) xs = maybe' (pure r) (NEL.nonEmpty goods) $ \goods' -> do
let m' = M.fromList . map (pname &&& id) $ toList goods'
r' = r & toInstallL %~ (<> m')
these (const $ pure r') (satisfy r') (const $ satisfy r') $ dividePkgs goods'
where
goods :: [Package]
goods = NEL.filter (\p -> not $ pname p `M.member` m) xs
allDeps :: NonEmpty Buildable -> Set Dep
allDeps = foldMap1 (S.fromList . (^.. to bDeps . each))
| which are not yet queued for install .
freshDeps :: Resolution -> Set Dep -> Set Dep
freshDeps (Resolution m s) = S.filter f
where
f :: Dep -> Bool
f d = let n = dName d in not $ M.member n m || S.member n s
| Consider only " unsatisfied " .
satisfy :: Resolution -> NonEmpty Buildable -> IO Resolution
satisfy r bs = maybe' (pure r) (nes . freshDeps r $ allDeps bs) $
areSatisfied (envOf ss) >=> these (lookups r) (pure . r') (\uns sat -> lookups (r' sat) uns)
where
r' :: Satisfied -> Resolution
r' (Satisfied sat) = r & satisfiedL %~ (<> f sat)
f :: NonEmpty Dep -> Set PkgName
f = S.fromList . NEL.toList . NEL.map dName
| Lookup unsatisfied and recurse the entire lookup process .
lookups :: Resolution -> Unsatisfied -> IO Resolution
lookups r (Unsatisfied ds) = do
let names = NEL.map dName ds
repoLookup repo ss names >>= \case
Nothing -> throwString "Unexpected AUR Connection Error"
Just (bads, could) -> case nes could of
Nothing -> do
let badNames = unwords . map (T.unpack . pnName) $ S.toList bads
throwString $ "Non-existant deps: " <> badNames
Just goods -> resolve r goods
conflicts :: Settings -> Map PkgName Package -> Set PkgName -> [DepError]
conflicts ss m s = foldMap f m
where
pm :: Map PkgName Package
pm = M.fromList $ map (\p -> (provides $ pprov p, p)) $ toList m
f :: Package -> [DepError]
f (FromRepo _) = []
f (FromAUR b) = flip mapMaybe (bDeps b) $ \d ->
let dn = dName d
Do n't do conflict checks for which are known to be satisfied on
in if S.member dn s then Nothing
else case M.lookup dn m <|> M.lookup dn pm of
Nothing -> Just . NonExistant dn $ bName b
Just p -> realPkgConflicts ss (bName b) p d
sortInstall :: Map PkgName Package -> Either Failure (NonEmpty (NonEmpty Package))
sortInstall m = case cycles depGraph of
[] -> note (Failure $ FailMsg missingPkg_3) . NEL.nonEmpty . mapMaybe nes $ batch depGraph
cs -> Left . Failure . FailMsg . missingPkg_4 $ map (NEL.map pname . NAM.vertexList1) cs
where
f :: Package -> [(Package, Package)]
f (FromRepo _) = []
f p@(FromAUR b) = mapMaybe (\d -> fmap (p,) $ dName d `M.lookup` m)
TODO handle " provides " ?
depGraph = overlay connected singles
elems = M.elems m
connected = edges $ foldMap f elems
singles = overlays $ map vertex elems
cycles :: Ord a => AdjacencyMap a -> [NAM.AdjacencyMap a]
cycles = filter (not . isAcyclic) . vertexList . scc
leaves :: Ord a => AdjacencyMap a -> Set a
leaves x = S.filter (null . flip postSet x) $ vertexSet x
batch :: Ord a => AdjacencyMap a -> [Set a]
batch g | isEmpty g = []
| otherwise = ls : batch (induce (`S.notMember` ls) g)
where ls = leaves g
1 . Is the package ignored in ` pacman.conf ` ?
2 . Is the version requested different from the one provided by
realPkgConflicts :: Settings -> PkgName -> Package -> Dep -> Maybe DepError
realPkgConflicts ss parent pkg dep
| pn `elem` toIgnore = Just $ Ignored failMsg1
| isVersionConflict reqVer curVer = Just $ VerConflict failMsg2
| otherwise = Nothing
where pn = pname pkg
curVer = pver pkg & release .~ []
reqVer = dDemand dep & _VersionDemand . release .~ []
lang = langOf ss
toIgnore = ignoresOf ss
failMsg1 = getRealPkgConflicts_2 pn lang
failMsg2 = getRealPkgConflicts_1 parent pn (prettyV curVer) (T.pack $ show reqVer) lang
| Compares a ( r)equested version number with a ( c)urrent up - to - date one .
The ` MustBe ` case uses regexes . A dependency demanding version 7.4
SHOULD match as ` okay ` against version 7.4 , 7.4.0.1 , or even 7.4.0.1 - 2 .
isVersionConflict :: VersionDemand -> Versioning -> Bool
isVersionConflict Anything _ = False
isVersionConflict (LessThan r) c = c >= r
isVersionConflict (MoreThan r) c = c <= r
isVersionConflict (MustBe r) c = c /= r
isVersionConflict (AtLeast r) c = c < r
|
e2a711517416f5c96ce8ddf9ce827572232fd7046773fdd891a3b7f1efa76304 | well-typed/large-records | Zipping.hs | {-# LANGUAGE ConstraintKinds #-}
# LANGUAGE DataKinds #
{-# LANGUAGE DeriveAnyClass #-}
# LANGUAGE DeriveGeneric #
# LANGUAGE DerivingStrategies #
# LANGUAGE ExistentialQuantification #
# LANGUAGE FlexibleContexts #
# LANGUAGE FlexibleInstances #
{-# LANGUAGE KindSignatures #-}
# LANGUAGE MultiParamTypeClasses #
{-# LANGUAGE ScopedTypeVariables #-}
# LANGUAGE StandaloneDeriving #
# LANGUAGE TypeApplications #
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
{-# OPTIONS_GHC -fplugin=Data.Record.Plugin.WithRDP #-}
module Test.Record.Beam.Zipping (tests) where
import Data.Functor.Identity
import Data.Kind
import Data.Record.Plugin
import Database.Beam
import Database.Beam.Schema.Tables
import Test.Tasty
import Test.Tasty.HUnit
import qualified GHC.Generics as GHC
import Data.Record.Beam ()
# ANN type largeRecord #
data TableA (f :: Type -> Type) = TableA {
taPrim :: PrimaryKey TableA f
, taField :: Columnar f Bool
, taMixin :: TableB f
}
deriving (Show, Eq)
deriving anyclass (Beamable)
# ANN type TableB largeRecord #
data TableB (f :: Type -> Type) = TableB {
tbField :: Columnar f Char
}
deriving (Show, Eq)
deriving anyclass (Beamable)
instance Table TableA where
data PrimaryKey TableA f = PrimA (Columnar f Int)
deriving stock (GHC.Generic)
deriving anyclass (Beamable)
primaryKey ta = ta.taPrim
deriving instance Show (Columnar f Int) => Show (PrimaryKey TableA f)
deriving instance Eq (Columnar f Int) => Eq (PrimaryKey TableA f)
tests :: TestTree
tests = testGroup "Test.Record.Beam.Zipping" [
testCase "zipBeamFields" test_zipBeamFields
]
test_zipBeamFields :: Assertion
test_zipBeamFields =
assertEqual "" (runIdentity (zipBeamFieldsM apply fnA argA)) resA
where
apply :: forall a.
Columnar' EndoFn a
-> Columnar' Identity a
-> Identity (Columnar' Identity a)
apply (Columnar' (EndoFn f)) (Columnar' x) = Identity (Columnar' (f x))
fnA :: TableA EndoFn
fnA = TableA {
taPrim = PrimA (EndoFn succ)
, taField = EndoFn not
, taMixin = fnB
}
fnB :: TableB EndoFn
fnB = TableB {
tbField = EndoFn pred
}
argA :: TableA Identity
argA = TableA {
taPrim = PrimA 5
, taField = True
, taMixin = argB
}
argB :: TableB Identity
argB = TableB {
tbField = 'y'
}
resA :: TableA Identity
resA = TableA {
taPrim = PrimA 6
, taField = False
, taMixin = resB
}
resB :: TableB Identity
resB = TableB {
tbField = 'x'
}
newtype EndoFn a = EndoFn (a -> a)
| null | https://raw.githubusercontent.com/well-typed/large-records/fb983aa136c2602499c2421323bd52b6a54b7c9a/beam-large-records/test/Test/Record/Beam/Zipping.hs | haskell | # LANGUAGE ConstraintKinds #
# LANGUAGE DeriveAnyClass #
# LANGUAGE KindSignatures #
# LANGUAGE ScopedTypeVariables #
# LANGUAGE TypeFamilies #
# LANGUAGE TypeOperators #
# LANGUAGE UndecidableInstances #
# OPTIONS_GHC -fplugin=Data.Record.Plugin.WithRDP # | # LANGUAGE DataKinds #
# LANGUAGE DeriveGeneric #
# LANGUAGE DerivingStrategies #
# LANGUAGE ExistentialQuantification #
# LANGUAGE FlexibleContexts #
# LANGUAGE FlexibleInstances #
# LANGUAGE MultiParamTypeClasses #
# LANGUAGE StandaloneDeriving #
# LANGUAGE TypeApplications #
module Test.Record.Beam.Zipping (tests) where
import Data.Functor.Identity
import Data.Kind
import Data.Record.Plugin
import Database.Beam
import Database.Beam.Schema.Tables
import Test.Tasty
import Test.Tasty.HUnit
import qualified GHC.Generics as GHC
import Data.Record.Beam ()
# ANN type largeRecord #
data TableA (f :: Type -> Type) = TableA {
taPrim :: PrimaryKey TableA f
, taField :: Columnar f Bool
, taMixin :: TableB f
}
deriving (Show, Eq)
deriving anyclass (Beamable)
# ANN type TableB largeRecord #
data TableB (f :: Type -> Type) = TableB {
tbField :: Columnar f Char
}
deriving (Show, Eq)
deriving anyclass (Beamable)
instance Table TableA where
data PrimaryKey TableA f = PrimA (Columnar f Int)
deriving stock (GHC.Generic)
deriving anyclass (Beamable)
primaryKey ta = ta.taPrim
deriving instance Show (Columnar f Int) => Show (PrimaryKey TableA f)
deriving instance Eq (Columnar f Int) => Eq (PrimaryKey TableA f)
tests :: TestTree
tests = testGroup "Test.Record.Beam.Zipping" [
testCase "zipBeamFields" test_zipBeamFields
]
test_zipBeamFields :: Assertion
test_zipBeamFields =
assertEqual "" (runIdentity (zipBeamFieldsM apply fnA argA)) resA
where
apply :: forall a.
Columnar' EndoFn a
-> Columnar' Identity a
-> Identity (Columnar' Identity a)
apply (Columnar' (EndoFn f)) (Columnar' x) = Identity (Columnar' (f x))
fnA :: TableA EndoFn
fnA = TableA {
taPrim = PrimA (EndoFn succ)
, taField = EndoFn not
, taMixin = fnB
}
fnB :: TableB EndoFn
fnB = TableB {
tbField = EndoFn pred
}
argA :: TableA Identity
argA = TableA {
taPrim = PrimA 5
, taField = True
, taMixin = argB
}
argB :: TableB Identity
argB = TableB {
tbField = 'y'
}
resA :: TableA Identity
resA = TableA {
taPrim = PrimA 6
, taField = False
, taMixin = resB
}
resB :: TableB Identity
resB = TableB {
tbField = 'x'
}
newtype EndoFn a = EndoFn (a -> a)
|
28f34a5ac385d19d0a5d01dda07a32c6782348c7a2b8207c263f9d1cfc4213cd | iij/lmq | lmq.erl | -module(lmq).
-include("lmq.hrl").
-export([start/0, stop/0]).
-export([push/2, push/3, pull/1, pull/2, pull/3, ack/2, abort/2, keep/2,
push_all/2, push_all/3, pull_any/1, pull_any/2, pull_any/3, delete/1,
get_props/1, update_props/1, update_props/2,
set_default_props/1, get_default_props/0,
status/0, queue_status/1, stats/0, stats/1]).
-define(DEPS, [lager, crypto, quickrand, uuid, msgpack, msgpack_rpc,
mnesia, ranch, cowlib, cowboy, jsonx, folsom, statsderl, lmq]).
%% ==================================================================
%% Public API
%% ==================================================================
start() ->
[ensure_started(Dep) || Dep <- ?DEPS],
lager:set_loglevel(lager_console_backend, debug).
stop() ->
[application:stop(Dep) || Dep <- lists:reverse(?DEPS)],
ok.
push(Name, Content) ->
push(Name, [], Content).
push(Name, MD, Content) when is_binary(Name) ->
push(binary_to_atom(Name, latin1), MD, Content);
push(Name, MD, Content) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
lmq_queue:push(Pid, {MD, Content}).
pull(Name) when is_binary(Name) ->
pull(binary_to_atom(Name, latin1));
pull(Name) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
Msg = lmq_queue:pull(Pid),
[{queue, Name} | lmq_lib:export_message(Msg)].
pull(Name, Timeout) when is_binary(Name) ->
pull(binary_to_atom(Name, latin1), Timeout);
pull(Name, Timeout) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
case lmq_queue:pull(Pid, Timeout) of
empty -> empty;
Msg -> [{queue, Name} | lmq_lib:export_message(Msg)]
end.
pull(Name, Timeout, Monitor) when is_binary(Name) ->
pull(binary_to_atom(Name, latin1), Timeout, Monitor);
pull(Name, Timeout, Monitor) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
Id = lmq_queue:pull_async(Pid, Timeout),
Wait = case Timeout of
infinity -> infinity;
0 -> infinity;
N -> round(N * 1000)
end,
MonitorRef = erlang:monitor(process, Monitor),
R = receive
{Id, {error, timeout}} -> empty;
{Id, Msg} -> [{queue, Name} | lmq_lib:export_message(Msg)];
{'DOWN', MonitorRef, process, Monitor, _} ->
lmq_queue:pull_cancel(Pid, Id),
receive
{Id, #message{id={_, UUID}}} -> lmq_queue:put_back(Pid, UUID)
after 0 -> ok
end,
{error, down}
after Wait ->
empty
end,
erlang:demonitor(MonitorRef, [flush]),
R.
ack(Name, UUID) ->
process_message(done, Name, UUID).
abort(Name, UUID) ->
process_message(release, Name, UUID).
keep(Name, UUID) ->
process_message(retain, Name, UUID).
push_all(Regexp, Content) ->
push_all(Regexp, [], Content).
push_all(Regexp, MD, Content) when is_binary(Regexp) ->
case lmq_queue_mgr:match(Regexp) of
{error, _}=R ->
R;
Queues ->
{ok, [{Name, lmq_queue:push(Pid, {MD, Content})} || {Name, Pid} <- Queues]}
end.
pull_any(Regexp) ->
pull_any(Regexp, inifinity).
pull_any(Regexp, Timeout) when is_binary(Regexp) ->
{ok, Pid} = lmq_mpull:start(),
lmq_mpull:pull(Pid, Regexp, Timeout).
pull_any(Regexp, Timeout, Monitor) when is_binary(Regexp) ->
{ok, Pid} = lmq_mpull:start(),
{ok, Ref} = lmq_mpull:pull_async(Pid, Regexp, Timeout),
MonitorRef = erlang:monitor(process, Monitor),
receive
{Ref, Msg} ->
erlang:demonitor(MonitorRef, [flush]),
Msg;
{'DOWN', MonitorRef, process, Monitor, _} ->
lmq_mpull:pull_cancel(Pid),
receive
{Ref, [{queue, Name}, {id, UUID}, _, _]} ->
Q = lmq_queue_mgr:get(Name, [create]),
lmq_queue:put_back(Q, UUID)
after 0 -> ok
end,
{error, down}
end.
delete(Name) when is_binary(Name) ->
delete(binary_to_atom(Name, latin1));
delete(Name) when is_atom(Name) ->
lmq_queue_mgr:delete(Name).
get_props(Name) when is_binary(Name) ->
get_props(binary_to_atom(Name, latin1));
get_props(Name) when is_atom(Name) ->
lmq_lib:get_properties(Name).
update_props(Name) ->
update_props(Name, []).
update_props(Name, Props) when is_binary(Name) ->
update_props(binary_to_atom(Name, latin1), Props);
update_props(Name, Props) when is_atom(Name) ->
lmq_queue_mgr:get(Name, [create, update, {props, Props}]).
set_default_props(Props) ->
lmq_queue_mgr:set_default_props(Props).
get_default_props() ->
lmq_queue_mgr:get_default_props().
status() ->
[{active_nodes, lists:sort(mnesia:system_info(running_db_nodes))},
{all_nodes, lists:sort(mnesia:system_info(db_nodes))},
{queues, [{N, queue_status(N)} || N <- lists:sort(lmq_lib:all_queue_names())]}
].
queue_status(Name) ->
[{size, mnesia:table_info(Name, size)},
{memory, mnesia:table_info(Name, memory) * erlang:system_info(wordsize)},
{nodes, mnesia:table_info(Name, where_to_write)},
{props, lmq_lib:get_properties(Name)}
].
stats() ->
[stats(N) || N <- lists:sort(lmq_lib:all_queue_names())].
stats(Name) when is_atom(Name) ->
{Name, [{push, lmq_metrics:get_metric(Name, push)},
{pull, lmq_metrics:get_metric(Name, pull)},
{retention, lmq_metrics:get_metric(Name, retention)}
]}.
%% ==================================================================
%% Private functions
%% ==================================================================
ensure_started(App) ->
case application:start(App) of
ok -> ok;
{error, {already_started, App}} -> ok
end.
process_message(Fun, Name, UUID) when is_atom(Fun), is_binary(Name) ->
process_message(Fun, binary_to_atom(Name, latin1), UUID);
process_message(Fun, Name, UUID) when is_atom(Fun), is_atom(Name) ->
case lmq_queue_mgr:get(Name) of
not_found ->
{error, queue_not_found};
Pid ->
try parse_uuid(UUID) of
MsgId ->
case lmq_queue:Fun(Pid, MsgId) of
ok -> ok;
not_found -> {error, not_found}
end
catch exit:badarg ->
{error, not_found}
end
end.
parse_uuid(UUID) when is_binary(UUID) ->
parse_uuid(binary_to_list(UUID));
parse_uuid(UUID) when is_list(UUID) ->
uuid:string_to_uuid(UUID);
parse_uuid(UUID) ->
UUID.
| null | https://raw.githubusercontent.com/iij/lmq/3f01c555af973a07a3f2b22ff95a2bc1c7930bc2/src/lmq.erl | erlang | ==================================================================
Public API
==================================================================
==================================================================
Private functions
================================================================== | -module(lmq).
-include("lmq.hrl").
-export([start/0, stop/0]).
-export([push/2, push/3, pull/1, pull/2, pull/3, ack/2, abort/2, keep/2,
push_all/2, push_all/3, pull_any/1, pull_any/2, pull_any/3, delete/1,
get_props/1, update_props/1, update_props/2,
set_default_props/1, get_default_props/0,
status/0, queue_status/1, stats/0, stats/1]).
-define(DEPS, [lager, crypto, quickrand, uuid, msgpack, msgpack_rpc,
mnesia, ranch, cowlib, cowboy, jsonx, folsom, statsderl, lmq]).
start() ->
[ensure_started(Dep) || Dep <- ?DEPS],
lager:set_loglevel(lager_console_backend, debug).
stop() ->
[application:stop(Dep) || Dep <- lists:reverse(?DEPS)],
ok.
push(Name, Content) ->
push(Name, [], Content).
push(Name, MD, Content) when is_binary(Name) ->
push(binary_to_atom(Name, latin1), MD, Content);
push(Name, MD, Content) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
lmq_queue:push(Pid, {MD, Content}).
pull(Name) when is_binary(Name) ->
pull(binary_to_atom(Name, latin1));
pull(Name) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
Msg = lmq_queue:pull(Pid),
[{queue, Name} | lmq_lib:export_message(Msg)].
pull(Name, Timeout) when is_binary(Name) ->
pull(binary_to_atom(Name, latin1), Timeout);
pull(Name, Timeout) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
case lmq_queue:pull(Pid, Timeout) of
empty -> empty;
Msg -> [{queue, Name} | lmq_lib:export_message(Msg)]
end.
pull(Name, Timeout, Monitor) when is_binary(Name) ->
pull(binary_to_atom(Name, latin1), Timeout, Monitor);
pull(Name, Timeout, Monitor) when is_atom(Name) ->
Pid = lmq_queue_mgr:get(Name, [create]),
Id = lmq_queue:pull_async(Pid, Timeout),
Wait = case Timeout of
infinity -> infinity;
0 -> infinity;
N -> round(N * 1000)
end,
MonitorRef = erlang:monitor(process, Monitor),
R = receive
{Id, {error, timeout}} -> empty;
{Id, Msg} -> [{queue, Name} | lmq_lib:export_message(Msg)];
{'DOWN', MonitorRef, process, Monitor, _} ->
lmq_queue:pull_cancel(Pid, Id),
receive
{Id, #message{id={_, UUID}}} -> lmq_queue:put_back(Pid, UUID)
after 0 -> ok
end,
{error, down}
after Wait ->
empty
end,
erlang:demonitor(MonitorRef, [flush]),
R.
ack(Name, UUID) ->
process_message(done, Name, UUID).
abort(Name, UUID) ->
process_message(release, Name, UUID).
keep(Name, UUID) ->
process_message(retain, Name, UUID).
push_all(Regexp, Content) ->
push_all(Regexp, [], Content).
push_all(Regexp, MD, Content) when is_binary(Regexp) ->
case lmq_queue_mgr:match(Regexp) of
{error, _}=R ->
R;
Queues ->
{ok, [{Name, lmq_queue:push(Pid, {MD, Content})} || {Name, Pid} <- Queues]}
end.
pull_any(Regexp) ->
pull_any(Regexp, inifinity).
pull_any(Regexp, Timeout) when is_binary(Regexp) ->
{ok, Pid} = lmq_mpull:start(),
lmq_mpull:pull(Pid, Regexp, Timeout).
pull_any(Regexp, Timeout, Monitor) when is_binary(Regexp) ->
{ok, Pid} = lmq_mpull:start(),
{ok, Ref} = lmq_mpull:pull_async(Pid, Regexp, Timeout),
MonitorRef = erlang:monitor(process, Monitor),
receive
{Ref, Msg} ->
erlang:demonitor(MonitorRef, [flush]),
Msg;
{'DOWN', MonitorRef, process, Monitor, _} ->
lmq_mpull:pull_cancel(Pid),
receive
{Ref, [{queue, Name}, {id, UUID}, _, _]} ->
Q = lmq_queue_mgr:get(Name, [create]),
lmq_queue:put_back(Q, UUID)
after 0 -> ok
end,
{error, down}
end.
delete(Name) when is_binary(Name) ->
delete(binary_to_atom(Name, latin1));
delete(Name) when is_atom(Name) ->
lmq_queue_mgr:delete(Name).
get_props(Name) when is_binary(Name) ->
get_props(binary_to_atom(Name, latin1));
get_props(Name) when is_atom(Name) ->
lmq_lib:get_properties(Name).
update_props(Name) ->
update_props(Name, []).
update_props(Name, Props) when is_binary(Name) ->
update_props(binary_to_atom(Name, latin1), Props);
update_props(Name, Props) when is_atom(Name) ->
lmq_queue_mgr:get(Name, [create, update, {props, Props}]).
set_default_props(Props) ->
lmq_queue_mgr:set_default_props(Props).
get_default_props() ->
lmq_queue_mgr:get_default_props().
status() ->
[{active_nodes, lists:sort(mnesia:system_info(running_db_nodes))},
{all_nodes, lists:sort(mnesia:system_info(db_nodes))},
{queues, [{N, queue_status(N)} || N <- lists:sort(lmq_lib:all_queue_names())]}
].
queue_status(Name) ->
[{size, mnesia:table_info(Name, size)},
{memory, mnesia:table_info(Name, memory) * erlang:system_info(wordsize)},
{nodes, mnesia:table_info(Name, where_to_write)},
{props, lmq_lib:get_properties(Name)}
].
stats() ->
[stats(N) || N <- lists:sort(lmq_lib:all_queue_names())].
stats(Name) when is_atom(Name) ->
{Name, [{push, lmq_metrics:get_metric(Name, push)},
{pull, lmq_metrics:get_metric(Name, pull)},
{retention, lmq_metrics:get_metric(Name, retention)}
]}.
ensure_started(App) ->
case application:start(App) of
ok -> ok;
{error, {already_started, App}} -> ok
end.
process_message(Fun, Name, UUID) when is_atom(Fun), is_binary(Name) ->
process_message(Fun, binary_to_atom(Name, latin1), UUID);
process_message(Fun, Name, UUID) when is_atom(Fun), is_atom(Name) ->
case lmq_queue_mgr:get(Name) of
not_found ->
{error, queue_not_found};
Pid ->
try parse_uuid(UUID) of
MsgId ->
case lmq_queue:Fun(Pid, MsgId) of
ok -> ok;
not_found -> {error, not_found}
end
catch exit:badarg ->
{error, not_found}
end
end.
parse_uuid(UUID) when is_binary(UUID) ->
parse_uuid(binary_to_list(UUID));
parse_uuid(UUID) when is_list(UUID) ->
uuid:string_to_uuid(UUID);
parse_uuid(UUID) ->
UUID.
|
4fc41260a0606d6e3f85dc72921ba9eb466c6f14a7c4b5761e3b8900d9ea8da3 | huangz1990/real-world-haskell-cn | FunctorLaws.hs | -- file: ch10/FunctorLaws.hs
fmap id == id
fmap (f . g) == fmap f . fmap g | null | https://raw.githubusercontent.com/huangz1990/real-world-haskell-cn/f67b07dd846b1950d17ff941d650089fcbbe9586/code/ch10/FunctorLaws.hs | haskell | file: ch10/FunctorLaws.hs | fmap id == id
fmap (f . g) == fmap f . fmap g |
fbe71a656fd3f001df4c9677e3bca0b59b653d1c11eb920c4058e44896eca85c | fluentpython/lispy | chap7c.scm | $ I d : , v 4.3 2006/11/24 18:16:37 queinnec Exp $
;;;(((((((((((((((((((((((((((((((( L i S P ))))))))))))))))))))))))))))))))
;;; This file is part of the files that accompany the book:
LISP Implantation Semantique Programmation ( InterEditions , France )
By Christian Queinnec < >
;;; Newest version may be retrieved from:
( IP 128.93.2.54 ) ftp.inria.fr : INRIA / Projects / icsla / Books / LiSP*.tar.gz
;;; Check the README file before using this file.
;;;(((((((((((((((((((((((((((((((( L i S P ))))))))))))))))))))))))))))))))
;;; Refinement of chap6d and chap7b. This interpreter introduces a
;;; *val* register and a *stack* to save/restore arguments that wait
;;; to be stored in an activation block. Functions now take their
;;; activation frame in the *val* register. Code is now a list of combinators.
;;; Load chap6d before.
(define *val* #f)
(define *fun* #f)
(define *arg1* #f)
(define *arg2* #f)
(define *pc* '())
(define *stack* (make-vector 1000))
(define *stack-index* 0)
(define (stack-push v)
(vector-set! *stack* *stack-index* v)
(set! *stack-index* (+ *stack-index* 1)) )
(define (stack-pop)
(set! *stack-index* (- *stack-index* 1))
(vector-ref *stack* *stack-index*) )
(define (save-stack)
(let ((copy (make-vector *stack-index*)))
(vector-copy! *stack* copy 0 *stack-index*)
copy ) )
(define (restore-stack copy)
(set! *stack-index* (vector-length copy))
(vector-copy! copy *stack* 0 *stack-index*) )
;;; Copy vector old[start..end[ into vector new[start..end[
(define (vector-copy! old new start end)
(let copy ((i start))
(when (< i end)
(vector-set! new i (vector-ref old i))
(copy (+ i 1)) ) ) )
(define-class primitive Object
( address ) )
(define-class continuation Object
( stack
) )
;;; Combinators
(define (SHALLOW-ARGUMENT-REF j)
(list (lambda () (set! *val* (activation-frame-argument *env* j)))) )
(define (PREDEFINED i)
(list (lambda () (set! *val* (predefined-fetch i)))) )
(define (DEEP-ARGUMENT-REF i j)
(list (lambda () (set! *val* (deep-fetch *env* i j)))) )
(define (SHALLOW-ARGUMENT-SET! j m)
(append m (SET-SHALLOW-ARGUMENT! j)) )
(define (SET-SHALLOW-ARGUMENT! j)
(list (lambda () (set-activation-frame-argument! *env* j *val*))) )
(define (DEEP-ARGUMENT-SET! i j m)
(append m (SET-DEEP-ARGUMENT! i j)) )
(define (SET-DEEP-ARGUMENT! i j)
(list (lambda () (deep-update! *env* i j *val*))) )
(define (GLOBAL-REF i)
(list (lambda () (set! *val* (global-fetch i)))) )
(define (CHECKED-GLOBAL-REF i)
(list (lambda () (set! *val* (global-fetch i))
(when (eq? *val* undefined-value)
(wrong "Uninitialized variable") ))) )
(define (GLOBAL-SET! i m)
(append m (SET-GLOBAL! i)) )
(define (SET-GLOBAL! i)
(list (lambda () (global-update! i *val*))) )
(define (CONSTANT value)
(list (lambda () (set! *val* value))) )
(define (ALTERNATIVE m1 m2 m3)
(append m1 (JUMP-FALSE (+ 1 (length m2)))
m2 (GOTO (length m3))
m3 ) )
(define (JUMP-FALSE i)
(list (lambda () (if (not *val*) (set! *pc* (list-tail *pc* i))))) )
(define (GOTO i)
(list (lambda () (set! *pc* (list-tail *pc* i)))) )
(define (SEQUENCE m m+)
(append m m+) )
(define (TR-FIX-LET m* m+)
(append m* (EXTEND-ENV) m+) )
(define (EXTEND-ENV)
(list (lambda () (set! *env* (sr-extend* *env* *val*)))) )
(define (FIX-LET m* m+)
(append m* (EXTEND-ENV) m+ (UNLINK-ENV)) )
(define (UNLINK-ENV)
(list (lambda () (set! *env* (activation-frame-next *env*)))) )
(define (CALL0 address)
(list (lambda () (set! *val* (address)))) )
(define (CALL1 address m1)
(append m1 (INVOKE1 address) ) )
(define (INVOKE1 address)
(list (lambda () (set! *val* (address *val*)))) )
(define (CALL2 address m1 m2)
(append m1 (PUSH-VALUE) m2 (POP-ARG1) (INVOKE2 address)) )
(define (PUSH-VALUE)
(list (lambda () (stack-push *val*))) )
(define (POP-ARG1)
(list (lambda () (set! *arg1* (stack-pop)))) )
(define (INVOKE2 address)
(list (lambda () (set! *val* (address *arg1* *val*)))) )
(define (CALL3 address m1 m2 m3)
(append m1 (PUSH-VALUE)
m2 (PUSH-VALUE)
m3 (POP-ARG2) (POP-ARG1) (INVOKE3 address) ) )
(define (POP-ARG2)
(list (lambda () (set! *arg2* (stack-pop)))) )
(define (INVOKE3 address)
(list (lambda () (set! *val* (address *arg1* *arg2* *val*)))) )
(define (FIX-CLOSURE m+ arity)
(define the-function
(append (ARITY=? (+ arity 1)) (EXTEND-ENV) m+ (RETURN)) )
(append (CREATE-CLOSURE 1) (GOTO (length the-function))
the-function ) )
(define (CREATE-CLOSURE offset)
(list (lambda () (set! *val* (make-closure (list-tail *pc* offset)
*env* )))) )
(define (ARITY=? arity+1)
(list (lambda ()
(unless (= (activation-frame-argument-length *val*) arity+1)
(wrong "Incorrect arity") ) )) )
(define (NARY-CLOSURE m+ arity)
(define the-function
(append (ARITY>=? (+ arity 1)) (PACK-FRAME! arity) (EXTEND-ENV)
m+ (RETURN) ) )
(append (CREATE-CLOSURE 1) (GOTO (length the-function))
the-function ) )
(define (RETURN)
(list (lambda () (set! *pc* (stack-pop)))) )
(define (PACK-FRAME! arity)
(list (lambda () (listify! *val* arity))) )
(define (ARITY>=? arity+1)
(list (lambda ()
(unless (>= (activation-frame-argument-length *val*) arity+1)
(wrong "Incorrect arity") ) )) )
(define (TR-REGULAR-CALL m m*)
(append m (PUSH-VALUE) m* (POP-FUNCTION) (FUNCTION-INVOKE)) )
(define (POP-FUNCTION)
(list (lambda () (set! *fun* (stack-pop)))) )
(define (FUNCTION-INVOKE)
(list (lambda () (invoke *fun*))) )
(define (REGULAR-CALL m m*)
(append m (PUSH-VALUE)
m* (POP-FUNCTION) (PRESERVE-ENV)
(FUNCTION-INVOKE) (RESTORE-ENV)
) )
(define (PRESERVE-ENV)
(list (lambda () (stack-push *env*))) )
(define (RESTORE-ENV)
(list (lambda () (set! *env* (stack-pop)))) )
(define (STORE-ARGUMENT m m* rank)
(append m (PUSH-VALUE) m* (POP-FRAME! rank)) )
(define (POP-FRAME! rank)
(list (lambda () (set-activation-frame-argument! *val* rank (stack-pop)))) )
(define (CONS-ARGUMENT m m* arity)
(append m (PUSH-VALUE) m* (POP-CONS-FRAME! arity)) )
(define (POP-CONS-FRAME! arity)
(list (lambda ()
(set-activation-frame-argument!
*val* arity (cons (stack-pop)
(activation-frame-argument *val* arity) ) ) )) )
(define (ALLOCATE-FRAME size)
(let ((size+1 (+ size 1)))
(list (lambda () (set! *val* (allocate-activation-frame size+1)))) ) )
(define (ALLOCATE-DOTTED-FRAME arity)
(let ((arity+1 (+ arity 1)))
(list (lambda ()
(let ((v* (allocate-activation-frame arity+1)))
(set-activation-frame-argument! v* arity '())
(set! *val* v*) ) )) ) )
(define (FINISH)
(list (lambda () (*exit* *val*))) )
(define (invoke f)
(cond ((closure? f)
(stack-push *pc*)
(set! *env* (closure-closed-environment f))
(set! *pc* (closure-code f)) )
((primitive? f)
((primitive-address f)) )
((continuation? f)
(if (= (+ 1 1) (activation-frame-argument-length *val*))
(begin
(restore-stack (continuation-stack f))
(set! *val* (activation-frame-argument *val* 0))
(set! *pc* (stack-pop)) )
(wrong "Incorrect arity" 'continuation) ) )
(else (wrong "Not a function" f)) ) )
(define-syntax defprimitive1
(syntax-rules ()
((defprimitive1 name value)
(definitial name
(letrec ((arity+1 (+ 1 1))
(behavior
(lambda ()
(if (= arity+1 (activation-frame-argument-length *val*))
(set! *val* (value (activation-frame-argument *val* 0)))
(wrong "Incorrect arity" 'name) ) ) ) )
(description-extend! 'name `(function ,value a))
(make-primitive behavior) ) ) ) ) )
(define-syntax defprimitive2
(syntax-rules ()
((defprimitive2 name value)
(definitial name
(letrec ((arity+1 (+ 2 1))
(behavior
(lambda ()
(show-registers 'name) ;; debug
(if (= arity+1 (activation-frame-argument-length *val*))
(set! *val*
(value (activation-frame-argument *val* 0)
(activation-frame-argument *val* 1) ) )
(wrong "Incorrect arity" 'name) ) ) ) )
(description-extend! 'name `(function ,value a b))
(make-primitive behavior) ) ) ) ) )
(defprimitive cons cons 2)
(defprimitive car car 1)
(defprimitive cdr cdr 1)
(defprimitive pair? pair? 1)
(defprimitive symbol? symbol? 1)
(defprimitive eq? eq? 2)
(defprimitive set-car! set-car! 2)
(defprimitive set-cdr! set-cdr! 2)
(defprimitive + + 2)
(defprimitive - - 2)
(defprimitive = = 2)
(defprimitive < < 2)
(defprimitive > > 2)
(defprimitive * * 2)
(defprimitive <= <= 2)
(defprimitive >= >= 2)
(defprimitive remainder remainder 2)
(defprimitive display display 1)
(definitial call/cc
(let* ((arity 1)
(arity+1 (+ arity 1)) )
(make-primitive
(lambda ()
(if (= arity+1 (activation-frame-argument-length *val*))
(let ((f (activation-frame-argument *val* 0))
(frame (allocate-activation-frame (+ 1 1))))
(stack-push *pc*)
(set-activation-frame-argument!
frame 0 (make-continuation (save-stack)) )
(stack-pop)
(set! *val* frame)
(invoke f) )
(wrong "Incorrect arity" 'call/cc) ) ) ) ) )
(definitial apply
(let* ((arity 2)
(arity+1 (+ arity 1)) )
(make-primitive
(lambda ()
(if (>= (activation-frame-argument-length *val*) arity+1)
(let* ((proc (activation-frame-argument *val* 0))
(last-arg-index (- (activation-frame-argument-length *val*) 2))
(last-arg (activation-frame-argument *val* last-arg-index))
(size (+ last-arg-index (length last-arg)))
(frame (allocate-activation-frame size)) )
(do ((i 1 (+ i 1)))
((= i last-arg-index))
(set-activation-frame-argument!
frame (- i 1) (activation-frame-argument *val* i) ) )
(do ((i (- last-arg-index 1) (+ i 1))
(last-arg last-arg (cdr last-arg)) )
((null? last-arg))
(set-activation-frame-argument! frame i (car last-arg)) )
(set! *val* frame)
(invoke proc) )
(wrong "Incorrect arity" 'apply) ) ) ) ) )
(definitial list
(make-primitive
(lambda ()
(let ((args-number (- (activation-frame-argument-length *val*) 1))
(result '()) )
(do ((i args-number (- i 1)))
((= i 0))
(set! result (cons (activation-frame-argument *val* (- i 1))
result )) )
(set! *val* result) ) ) ) )
(define *debug* #f)
(define (show-registers message)
(when *debug* (format #t "
----------------~A
PC = -~A
ENV = ~A
VAL = ~A
FUN = ~A
STACK = ~A~%" message (length *pc*)
*env* *val* *fun* (save-stack) ) )
)
(define (run)
(let ((instruction (car *pc*)))
(set! *pc* (cdr *pc*))
(instruction)
(run) ) )
(define install-regular-combinators
(let ((originals (map eval combinator-names)))
(lambda ()
(for-each (lambda (old-value name)
(eval `(set! ,name ',old-value)) )
originals
combinator-names ) ) ) )
(define (install-disassembling-combinators)
(for-each (lambda (name)
(eval `(set! ,name (lambda args (,name . ,args)))) )
combinator-names ) )
(define combinator-names
'( SHALLOW-ARGUMENT-REF
PREDEFINED
DEEP-ARGUMENT-REF
SET-SHALLOW-ARGUMENT
SET-DEEP-ARGUMENT!
GLOBAL-REF
CHECKED-GLOBAL-REF
SET-GLOBAL!
CONSTANT
JUMP-FALSE
GOTO
EXTEND-ENV
UNLINK-ENV
CALL0
INVOKE1
PUSH-VALUE
POP-ARG1
INVOKE2
POP-ARG2
INVOKE3
CREATE-CLOSURE
ARITY=?
RETURN
PACK-FRAME!
ARITY>=?
POP-FUNCTION
FUNCTION-INVOKE
PRESERVE-ENV
RESTORE-ENV
POP-FRAME!
POP-CONS-FRAME!
ALLOCATE-FRAME
ALLOCATE-DOTTED-FRAME
FINISH
) )
(define (chapter7c-interpreter)
(define (toplevel)
(define e (read))
(set! *env* sr.init)
(set! *val* #f)
(set! *fun* #f)
(set! *arg1* #f)
(set! *arg2* #f)
(set! *stack-index* 0)
(set! *pc* (append (meaning e r.init #t)
(FINISH) ))
(when *debug* (disassemble e) (display *pc*) (newline)) ;; DEBUG
(call/cc (lambda (exit)
(set! *exit* exit)
(run) ))
(display *val*)
(toplevel) )
(toplevel) )
(define (stand-alone-producer7c e)
(set! g.current (original.g.current))
(let* ((m (meaning e r.init #t))
(size (length g.current))
(global-names (map car (reverse g.current))) )
(when *debug* (disassemble e)) ;; DEBUG
(lambda ()
(set! sg.current (make-vector size undefined-value))
(set! sg.current.names global-names)
(set! *env* sr.init)
(set! *val* #f)
(set! *fun* #f)
(set! *arg1* #f)
(set! *arg2* #f)
(set! *stack-index* 0)
(set! *pc* (append m (FINISH)))
;;(display m)(newline) ;; debug
(call/cc (lambda (exit)
(set! *exit* exit)
(run) )) ) ) )
(define (test-scheme7c file)
(suite-test
file
"Scheme? "
"Scheme= "
#t
(lambda (read check error)
(set! wrong error)
(set! static-wrong error)
(lambda ()
((stand-alone-producer7c (read)))
(check *val*) ) )
equal? ) )
;;; Missing definitions
(define *exit* #f)
;;; end of chap7c.scm
| null | https://raw.githubusercontent.com/fluentpython/lispy/6b995c398e2d100fc3fc292e34ba1a00c0ae9b5a/references/LiSP-2ndEdition-2006Dec11/src/chap7c.scm | scheme | (((((((((((((((((((((((((((((((( L i S P ))))))))))))))))))))))))))))))))
This file is part of the files that accompany the book:
Newest version may be retrieved from:
Check the README file before using this file.
(((((((((((((((((((((((((((((((( L i S P ))))))))))))))))))))))))))))))))
Refinement of chap6d and chap7b. This interpreter introduces a
*val* register and a *stack* to save/restore arguments that wait
to be stored in an activation block. Functions now take their
activation frame in the *val* register. Code is now a list of combinators.
Load chap6d before.
Copy vector old[start..end[ into vector new[start..end[
Combinators
debug
DEBUG
DEBUG
(display m)(newline) ;; debug
Missing definitions
end of chap7c.scm | $ I d : , v 4.3 2006/11/24 18:16:37 queinnec Exp $
LISP Implantation Semantique Programmation ( InterEditions , France )
By Christian Queinnec < >
( IP 128.93.2.54 ) ftp.inria.fr : INRIA / Projects / icsla / Books / LiSP*.tar.gz
(define *val* #f)
(define *fun* #f)
(define *arg1* #f)
(define *arg2* #f)
(define *pc* '())
(define *stack* (make-vector 1000))
(define *stack-index* 0)
(define (stack-push v)
(vector-set! *stack* *stack-index* v)
(set! *stack-index* (+ *stack-index* 1)) )
(define (stack-pop)
(set! *stack-index* (- *stack-index* 1))
(vector-ref *stack* *stack-index*) )
(define (save-stack)
(let ((copy (make-vector *stack-index*)))
(vector-copy! *stack* copy 0 *stack-index*)
copy ) )
(define (restore-stack copy)
(set! *stack-index* (vector-length copy))
(vector-copy! copy *stack* 0 *stack-index*) )
(define (vector-copy! old new start end)
(let copy ((i start))
(when (< i end)
(vector-set! new i (vector-ref old i))
(copy (+ i 1)) ) ) )
(define-class primitive Object
( address ) )
(define-class continuation Object
( stack
) )
(define (SHALLOW-ARGUMENT-REF j)
(list (lambda () (set! *val* (activation-frame-argument *env* j)))) )
(define (PREDEFINED i)
(list (lambda () (set! *val* (predefined-fetch i)))) )
(define (DEEP-ARGUMENT-REF i j)
(list (lambda () (set! *val* (deep-fetch *env* i j)))) )
(define (SHALLOW-ARGUMENT-SET! j m)
(append m (SET-SHALLOW-ARGUMENT! j)) )
(define (SET-SHALLOW-ARGUMENT! j)
(list (lambda () (set-activation-frame-argument! *env* j *val*))) )
(define (DEEP-ARGUMENT-SET! i j m)
(append m (SET-DEEP-ARGUMENT! i j)) )
(define (SET-DEEP-ARGUMENT! i j)
(list (lambda () (deep-update! *env* i j *val*))) )
(define (GLOBAL-REF i)
(list (lambda () (set! *val* (global-fetch i)))) )
(define (CHECKED-GLOBAL-REF i)
(list (lambda () (set! *val* (global-fetch i))
(when (eq? *val* undefined-value)
(wrong "Uninitialized variable") ))) )
(define (GLOBAL-SET! i m)
(append m (SET-GLOBAL! i)) )
(define (SET-GLOBAL! i)
(list (lambda () (global-update! i *val*))) )
(define (CONSTANT value)
(list (lambda () (set! *val* value))) )
(define (ALTERNATIVE m1 m2 m3)
(append m1 (JUMP-FALSE (+ 1 (length m2)))
m2 (GOTO (length m3))
m3 ) )
(define (JUMP-FALSE i)
(list (lambda () (if (not *val*) (set! *pc* (list-tail *pc* i))))) )
(define (GOTO i)
(list (lambda () (set! *pc* (list-tail *pc* i)))) )
(define (SEQUENCE m m+)
(append m m+) )
(define (TR-FIX-LET m* m+)
(append m* (EXTEND-ENV) m+) )
(define (EXTEND-ENV)
(list (lambda () (set! *env* (sr-extend* *env* *val*)))) )
(define (FIX-LET m* m+)
(append m* (EXTEND-ENV) m+ (UNLINK-ENV)) )
(define (UNLINK-ENV)
(list (lambda () (set! *env* (activation-frame-next *env*)))) )
(define (CALL0 address)
(list (lambda () (set! *val* (address)))) )
(define (CALL1 address m1)
(append m1 (INVOKE1 address) ) )
(define (INVOKE1 address)
(list (lambda () (set! *val* (address *val*)))) )
(define (CALL2 address m1 m2)
(append m1 (PUSH-VALUE) m2 (POP-ARG1) (INVOKE2 address)) )
(define (PUSH-VALUE)
(list (lambda () (stack-push *val*))) )
(define (POP-ARG1)
(list (lambda () (set! *arg1* (stack-pop)))) )
(define (INVOKE2 address)
(list (lambda () (set! *val* (address *arg1* *val*)))) )
(define (CALL3 address m1 m2 m3)
(append m1 (PUSH-VALUE)
m2 (PUSH-VALUE)
m3 (POP-ARG2) (POP-ARG1) (INVOKE3 address) ) )
(define (POP-ARG2)
(list (lambda () (set! *arg2* (stack-pop)))) )
(define (INVOKE3 address)
(list (lambda () (set! *val* (address *arg1* *arg2* *val*)))) )
(define (FIX-CLOSURE m+ arity)
(define the-function
(append (ARITY=? (+ arity 1)) (EXTEND-ENV) m+ (RETURN)) )
(append (CREATE-CLOSURE 1) (GOTO (length the-function))
the-function ) )
(define (CREATE-CLOSURE offset)
(list (lambda () (set! *val* (make-closure (list-tail *pc* offset)
*env* )))) )
(define (ARITY=? arity+1)
(list (lambda ()
(unless (= (activation-frame-argument-length *val*) arity+1)
(wrong "Incorrect arity") ) )) )
(define (NARY-CLOSURE m+ arity)
(define the-function
(append (ARITY>=? (+ arity 1)) (PACK-FRAME! arity) (EXTEND-ENV)
m+ (RETURN) ) )
(append (CREATE-CLOSURE 1) (GOTO (length the-function))
the-function ) )
(define (RETURN)
(list (lambda () (set! *pc* (stack-pop)))) )
(define (PACK-FRAME! arity)
(list (lambda () (listify! *val* arity))) )
(define (ARITY>=? arity+1)
(list (lambda ()
(unless (>= (activation-frame-argument-length *val*) arity+1)
(wrong "Incorrect arity") ) )) )
(define (TR-REGULAR-CALL m m*)
(append m (PUSH-VALUE) m* (POP-FUNCTION) (FUNCTION-INVOKE)) )
(define (POP-FUNCTION)
(list (lambda () (set! *fun* (stack-pop)))) )
(define (FUNCTION-INVOKE)
(list (lambda () (invoke *fun*))) )
(define (REGULAR-CALL m m*)
(append m (PUSH-VALUE)
m* (POP-FUNCTION) (PRESERVE-ENV)
(FUNCTION-INVOKE) (RESTORE-ENV)
) )
(define (PRESERVE-ENV)
(list (lambda () (stack-push *env*))) )
(define (RESTORE-ENV)
(list (lambda () (set! *env* (stack-pop)))) )
(define (STORE-ARGUMENT m m* rank)
(append m (PUSH-VALUE) m* (POP-FRAME! rank)) )
(define (POP-FRAME! rank)
(list (lambda () (set-activation-frame-argument! *val* rank (stack-pop)))) )
(define (CONS-ARGUMENT m m* arity)
(append m (PUSH-VALUE) m* (POP-CONS-FRAME! arity)) )
(define (POP-CONS-FRAME! arity)
(list (lambda ()
(set-activation-frame-argument!
*val* arity (cons (stack-pop)
(activation-frame-argument *val* arity) ) ) )) )
(define (ALLOCATE-FRAME size)
(let ((size+1 (+ size 1)))
(list (lambda () (set! *val* (allocate-activation-frame size+1)))) ) )
(define (ALLOCATE-DOTTED-FRAME arity)
(let ((arity+1 (+ arity 1)))
(list (lambda ()
(let ((v* (allocate-activation-frame arity+1)))
(set-activation-frame-argument! v* arity '())
(set! *val* v*) ) )) ) )
(define (FINISH)
(list (lambda () (*exit* *val*))) )
(define (invoke f)
(cond ((closure? f)
(stack-push *pc*)
(set! *env* (closure-closed-environment f))
(set! *pc* (closure-code f)) )
((primitive? f)
((primitive-address f)) )
((continuation? f)
(if (= (+ 1 1) (activation-frame-argument-length *val*))
(begin
(restore-stack (continuation-stack f))
(set! *val* (activation-frame-argument *val* 0))
(set! *pc* (stack-pop)) )
(wrong "Incorrect arity" 'continuation) ) )
(else (wrong "Not a function" f)) ) )
(define-syntax defprimitive1
(syntax-rules ()
((defprimitive1 name value)
(definitial name
(letrec ((arity+1 (+ 1 1))
(behavior
(lambda ()
(if (= arity+1 (activation-frame-argument-length *val*))
(set! *val* (value (activation-frame-argument *val* 0)))
(wrong "Incorrect arity" 'name) ) ) ) )
(description-extend! 'name `(function ,value a))
(make-primitive behavior) ) ) ) ) )
(define-syntax defprimitive2
(syntax-rules ()
((defprimitive2 name value)
(definitial name
(letrec ((arity+1 (+ 2 1))
(behavior
(lambda ()
(if (= arity+1 (activation-frame-argument-length *val*))
(set! *val*
(value (activation-frame-argument *val* 0)
(activation-frame-argument *val* 1) ) )
(wrong "Incorrect arity" 'name) ) ) ) )
(description-extend! 'name `(function ,value a b))
(make-primitive behavior) ) ) ) ) )
(defprimitive cons cons 2)
(defprimitive car car 1)
(defprimitive cdr cdr 1)
(defprimitive pair? pair? 1)
(defprimitive symbol? symbol? 1)
(defprimitive eq? eq? 2)
(defprimitive set-car! set-car! 2)
(defprimitive set-cdr! set-cdr! 2)
(defprimitive + + 2)
(defprimitive - - 2)
(defprimitive = = 2)
(defprimitive < < 2)
(defprimitive > > 2)
(defprimitive * * 2)
(defprimitive <= <= 2)
(defprimitive >= >= 2)
(defprimitive remainder remainder 2)
(defprimitive display display 1)
(definitial call/cc
(let* ((arity 1)
(arity+1 (+ arity 1)) )
(make-primitive
(lambda ()
(if (= arity+1 (activation-frame-argument-length *val*))
(let ((f (activation-frame-argument *val* 0))
(frame (allocate-activation-frame (+ 1 1))))
(stack-push *pc*)
(set-activation-frame-argument!
frame 0 (make-continuation (save-stack)) )
(stack-pop)
(set! *val* frame)
(invoke f) )
(wrong "Incorrect arity" 'call/cc) ) ) ) ) )
(definitial apply
(let* ((arity 2)
(arity+1 (+ arity 1)) )
(make-primitive
(lambda ()
(if (>= (activation-frame-argument-length *val*) arity+1)
(let* ((proc (activation-frame-argument *val* 0))
(last-arg-index (- (activation-frame-argument-length *val*) 2))
(last-arg (activation-frame-argument *val* last-arg-index))
(size (+ last-arg-index (length last-arg)))
(frame (allocate-activation-frame size)) )
(do ((i 1 (+ i 1)))
((= i last-arg-index))
(set-activation-frame-argument!
frame (- i 1) (activation-frame-argument *val* i) ) )
(do ((i (- last-arg-index 1) (+ i 1))
(last-arg last-arg (cdr last-arg)) )
((null? last-arg))
(set-activation-frame-argument! frame i (car last-arg)) )
(set! *val* frame)
(invoke proc) )
(wrong "Incorrect arity" 'apply) ) ) ) ) )
(definitial list
(make-primitive
(lambda ()
(let ((args-number (- (activation-frame-argument-length *val*) 1))
(result '()) )
(do ((i args-number (- i 1)))
((= i 0))
(set! result (cons (activation-frame-argument *val* (- i 1))
result )) )
(set! *val* result) ) ) ) )
(define *debug* #f)
(define (show-registers message)
(when *debug* (format #t "
----------------~A
PC = -~A
ENV = ~A
VAL = ~A
FUN = ~A
STACK = ~A~%" message (length *pc*)
*env* *val* *fun* (save-stack) ) )
)
(define (run)
(let ((instruction (car *pc*)))
(set! *pc* (cdr *pc*))
(instruction)
(run) ) )
(define install-regular-combinators
(let ((originals (map eval combinator-names)))
(lambda ()
(for-each (lambda (old-value name)
(eval `(set! ,name ',old-value)) )
originals
combinator-names ) ) ) )
(define (install-disassembling-combinators)
(for-each (lambda (name)
(eval `(set! ,name (lambda args (,name . ,args)))) )
combinator-names ) )
(define combinator-names
'( SHALLOW-ARGUMENT-REF
PREDEFINED
DEEP-ARGUMENT-REF
SET-SHALLOW-ARGUMENT
SET-DEEP-ARGUMENT!
GLOBAL-REF
CHECKED-GLOBAL-REF
SET-GLOBAL!
CONSTANT
JUMP-FALSE
GOTO
EXTEND-ENV
UNLINK-ENV
CALL0
INVOKE1
PUSH-VALUE
POP-ARG1
INVOKE2
POP-ARG2
INVOKE3
CREATE-CLOSURE
ARITY=?
RETURN
PACK-FRAME!
ARITY>=?
POP-FUNCTION
FUNCTION-INVOKE
PRESERVE-ENV
RESTORE-ENV
POP-FRAME!
POP-CONS-FRAME!
ALLOCATE-FRAME
ALLOCATE-DOTTED-FRAME
FINISH
) )
(define (chapter7c-interpreter)
(define (toplevel)
(define e (read))
(set! *env* sr.init)
(set! *val* #f)
(set! *fun* #f)
(set! *arg1* #f)
(set! *arg2* #f)
(set! *stack-index* 0)
(set! *pc* (append (meaning e r.init #t)
(FINISH) ))
(call/cc (lambda (exit)
(set! *exit* exit)
(run) ))
(display *val*)
(toplevel) )
(toplevel) )
(define (stand-alone-producer7c e)
(set! g.current (original.g.current))
(let* ((m (meaning e r.init #t))
(size (length g.current))
(global-names (map car (reverse g.current))) )
(lambda ()
(set! sg.current (make-vector size undefined-value))
(set! sg.current.names global-names)
(set! *env* sr.init)
(set! *val* #f)
(set! *fun* #f)
(set! *arg1* #f)
(set! *arg2* #f)
(set! *stack-index* 0)
(set! *pc* (append m (FINISH)))
(call/cc (lambda (exit)
(set! *exit* exit)
(run) )) ) ) )
(define (test-scheme7c file)
(suite-test
file
"Scheme? "
"Scheme= "
#t
(lambda (read check error)
(set! wrong error)
(set! static-wrong error)
(lambda ()
((stand-alone-producer7c (read)))
(check *val*) ) )
equal? ) )
(define *exit* #f)
|
e7e0a29e971bafe032d2bd9a07e8b22aba538059c38f63d8124fd840def772a4 | sdanzan/erlang-systools | testinotify.erl | %%% -------------------------------------------------------------------------
%%% Sample test / demonstration program.
Simply call testinotify : test ( ) in erlang shell and play around with the
%%% watched directory / file. To stop the program, just touch/create a file
%%% named 'stop' in the watched directory.
%%%
%%% ex.:
%%% test(".").
%%% test("/tmp", [ recursive ]).
%%% test("/tmp", [ { exclude, "toto" }, { events, [ close, attrib ] } ]).
-module(testinotify).
-export([test/0, test/1, test/2]).
-record(inotify, { event, isdir, file, watched }).
test() -> test(".").
test(Watched) -> test(Watched, []).
test(Watched, Options) ->
Wrapper = inotifywrapper:start(Watched, Options),
loop(Wrapper).
loop(Wrapper) ->
receive
{ Wrapper, Event } ->
case Event#inotify.event of
access -> io:format("~s was accessed (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
attrib -> io:format("~s metadata was modified (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
modify -> io:format("~s was modified (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
close_write -> io:format("~s was closed (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
close_nowrite -> io:format("~s was closed read only (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
open -> io:format("~s was opened (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
moved_to -> io:format("~s was moved to (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
moved_from -> io:format("~s was moved from (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
create -> io:format("~s was created (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
delete -> io:format("~s was deleted (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ])
end,
case Event#inotify.file of
"stop" -> inotifywrapper:close(Wrapper);
_ -> loop(Wrapper)
end
after 120000 ->
inotifywrapper:close(Wrapper), nok
end.
| null | https://raw.githubusercontent.com/sdanzan/erlang-systools/ced3faf1c807d36c528e53cbb366d69f464ff4e5/test/testinotify.erl | erlang | -------------------------------------------------------------------------
Sample test / demonstration program.
watched directory / file. To stop the program, just touch/create a file
named 'stop' in the watched directory.
ex.:
test(".").
test("/tmp", [ recursive ]).
test("/tmp", [ { exclude, "toto" }, { events, [ close, attrib ] } ]). | Simply call testinotify : test ( ) in erlang shell and play around with the
-module(testinotify).
-export([test/0, test/1, test/2]).
-record(inotify, { event, isdir, file, watched }).
test() -> test(".").
test(Watched) -> test(Watched, []).
test(Watched, Options) ->
Wrapper = inotifywrapper:start(Watched, Options),
loop(Wrapper).
loop(Wrapper) ->
receive
{ Wrapper, Event } ->
case Event#inotify.event of
access -> io:format("~s was accessed (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
attrib -> io:format("~s metadata was modified (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
modify -> io:format("~s was modified (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
close_write -> io:format("~s was closed (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
close_nowrite -> io:format("~s was closed read only (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
open -> io:format("~s was opened (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
moved_to -> io:format("~s was moved to (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
moved_from -> io:format("~s was moved from (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
create -> io:format("~s was created (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ]);
delete -> io:format("~s was deleted (dir: ~w).~n",
[ Event#inotify.file, Event#inotify.isdir ])
end,
case Event#inotify.file of
"stop" -> inotifywrapper:close(Wrapper);
_ -> loop(Wrapper)
end
after 120000 ->
inotifywrapper:close(Wrapper), nok
end.
|
6e2bfb276b566cd54ce8f4a7849c81bb2965ed88abdfc5bda276a9bf142d5b67 | jgm/grammata | main.hs | {-# LANGUAGE OverloadedStrings #-}
import Grammata.Parse (interpretDoc)
import Grammata.Util (showInterpreterError)
import Options.Applicative hiding (UnknownError)
import Control.Monad.RWS
import Language.Haskell.Interpreter -- hint
import Grammata.Types
import System.IO (stderr, hPutStrLn)
import Data.Char (toUpper, toLower)
import qualified Data.ByteString.Lazy as BL
data Options = Options
{ verbosity :: Int
, format :: String
, inpFile :: String
}
options :: Parser Options
options = Options
<$> option auto
( short 'v'
<> hidden
<> metavar "[1|2]"
<> value 0
<> help "Verbosity level" )
<*> argument str (metavar "FORMAT")
<*> argument str (metavar "FILE")
main :: IO ()
main = execParser options' >>= runWithOptions
where
options' = info (helper <*> options)
(briefDesc <> header "grammata - convert text using macros")
runWithOptions :: Options -> IO ()
runWithOptions opts = do
let formatFormat [] = []
formatFormat (x:xs) = toUpper x : map toLower xs
doc <- if (inpFile opts == "-")
then getContents
else readFile (inpFile opts)
r <- runInterpreter (interpretDoc (verbosity opts) doc
(formatFormat $ format opts))
case r of
Left e -> hPutStrLn stderr (showInterpreterError e)
Right x -> do
render x >>= liftIO . BL.putStr
liftIO $ BL.putStr "\n"
| null | https://raw.githubusercontent.com/jgm/grammata/3d8a09f5d287ecfe4611f51b603f51926e77811e/src/main.hs | haskell | # LANGUAGE OverloadedStrings #
hint |
import Grammata.Parse (interpretDoc)
import Grammata.Util (showInterpreterError)
import Options.Applicative hiding (UnknownError)
import Control.Monad.RWS
import Grammata.Types
import System.IO (stderr, hPutStrLn)
import Data.Char (toUpper, toLower)
import qualified Data.ByteString.Lazy as BL
data Options = Options
{ verbosity :: Int
, format :: String
, inpFile :: String
}
options :: Parser Options
options = Options
<$> option auto
( short 'v'
<> hidden
<> metavar "[1|2]"
<> value 0
<> help "Verbosity level" )
<*> argument str (metavar "FORMAT")
<*> argument str (metavar "FILE")
main :: IO ()
main = execParser options' >>= runWithOptions
where
options' = info (helper <*> options)
(briefDesc <> header "grammata - convert text using macros")
runWithOptions :: Options -> IO ()
runWithOptions opts = do
let formatFormat [] = []
formatFormat (x:xs) = toUpper x : map toLower xs
doc <- if (inpFile opts == "-")
then getContents
else readFile (inpFile opts)
r <- runInterpreter (interpretDoc (verbosity opts) doc
(formatFormat $ format opts))
case r of
Left e -> hPutStrLn stderr (showInterpreterError e)
Right x -> do
render x >>= liftIO . BL.putStr
liftIO $ BL.putStr "\n"
|
c6018c1f77f7f994c736ad7687dc09ff5c7df6bf585911385ff946ffc87124f6 | lambdaisland/ansi | cljs_test_runner.clj | (ns lambdaisland.ansi.cljs-test-runner
(:gen-class)
(:require [doo.core :as doo]
[cljs.build.api :as cljs]))
(def cljs-config {:main 'lambdaisland.ansi-test
:output-to "out/testable.js"
:output-dir "out"
:optimizations :simple
:target :nodejs})
(defn -main [& args]
(cljs/build ["src" "test"] cljs-config)
(let [{:keys [exit] :as res}
(doo/run-script :node cljs-config {:debug true})]
(System/exit exit)))
| null | https://raw.githubusercontent.com/lambdaisland/ansi/76a239f820e8f85790cf5b578ad0f08c8e24cf75/test/lambdaisland/ansi/cljs_test_runner.clj | clojure | (ns lambdaisland.ansi.cljs-test-runner
(:gen-class)
(:require [doo.core :as doo]
[cljs.build.api :as cljs]))
(def cljs-config {:main 'lambdaisland.ansi-test
:output-to "out/testable.js"
:output-dir "out"
:optimizations :simple
:target :nodejs})
(defn -main [& args]
(cljs/build ["src" "test"] cljs-config)
(let [{:keys [exit] :as res}
(doo/run-script :node cljs-config {:debug true})]
(System/exit exit)))
|
|
e4a391542e35204d1ae281308741c1fc3bce7238906ba3a982d851fae43660ab | huangjs/cl | mring.lisp | A Maxima ring stucture
Copyright ( C ) 2005 , 2007 ,
Barton Willis
Department of Mathematics
University of Nebraska at Kearney
;; Kearney NE 68847
;;
;; This source code is licensed under the terms of the Lisp Lesser
GNU Public License ( LLGPL ) . The LLGPL consists of a preamble , published
by Franz Inc. ( ) , and the GNU
Library General Public License ( LGPL ) , version 2 , or ( at your option )
;; any later version. When the preamble conflicts with the LGPL,
;; the preamble takes precedence.
;; This library is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty of
;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for details .
You should have received a copy of the GNU Library General Public
;; License along with this library; if not, write to the
Free Software Foundation , Inc. , 51 Franklin St , Fifth Floor ,
Boston , MA 02110 - 1301 , USA .
;; Let's have version numbers 1,2,3,...
(eval-when (:compile-toplevel :load-toplevel :execute)
($put '$mring 1 '$version))
;; (1) In maxima-grobner.lisp, there is a structure 'ring.'
( 2 ) Some functions in this structure , for example ' great ' might
;; not be defined for a ring; when this is the case, a function
;; can signal an error.
( 3 ) Floating point addition is n't associative ; so a mring need n't
;; be a ring. But a mring is 'close' to being a ring.
;; Description of the mring fields:
(defstruct mring
name
coerce-to-lisp-float
abs
great
add
div
rdiv
reciprocal
mult
sub
negate
psqrt
add-id
mult-id
fzerop
adjoint
maxima-to-mring
mring-to-maxima)
(eval-when (:compile-toplevel :load-toplevel :execute)
(defmvar $%mrings `((mlist) $floatfield $complexfield $rationalfield $crering $generalring $bigfloatfield
$runningerror $noncommutingring)))
(defun $require_ring (ringname pos fun)
(if ($member ringname $%mrings) (get ringname 'ring)
(merror "The ~:M argument of the function '~:M' must be the name of a ring" pos fun)))
(defparameter *floatfield*
(make-mring
:name '$floatfield
:coerce-to-lisp-float #'cl:identity
:abs #'abs
:great #'>
:add #'+
:div #'/
:rdiv #'/
:reciprocal #'/
:mult #'*
:sub #'-
:negate #'-
:psqrt #'(lambda (s) (if (>= s 0) (cl:sqrt s) nil))
:add-id #'(lambda () 0.0)
:mult-id #'(lambda () 1.0)
:fzerop #'(lambda (s) (< (abs s) (* 4 flonum-epsilon)))
:adjoint #'cl:identity
:mring-to-maxima #'cl:identity
:maxima-to-mring #'(lambda (s)
(setq s ($float s))
(if (floatp s) s (merror "Unable to convert ~:M to a long float" s)))))
(setf (get '$floatfield 'ring) *floatfield*)
(defparameter *complexfield*
(make-mring
:name '$complexfield
:coerce-to-lisp-float #'cl:identity
:abs #'abs
:great #'>
:add #'+
:div #'/
:rdiv #'/
:reciprocal #'/
:mult #'*
:sub #'-
:negate #'-
:psqrt #'(lambda (s) (if (and (= 0 (imagpart s)) (>= (realpart s) 0)) (cl:sqrt s) nil))
:add-id #'(lambda () 0.0)
:mult-id #'(lambda () 1.0)
:fzerop #'(lambda (s) (< (abs s) (* 4 flonum-epsilon)))
:adjoint #'cl:conjugate
was
:maxima-to-mring #'(lambda (s)
(progn
(setq s ($rectform (meval s)))
(if (complex-number-p s 'float-or-rational-p)
(complex ($float ($realpart s)) ($float ($imagpart s)))
(merror "Unable to convert ~:M to a complex long float" s))))))
(setf (get '$complexfield 'ring) *complexfield*)
(defparameter *rationalfield*
(make-mring
:name '$rationalfield
:coerce-to-lisp-float #'(lambda (s) ($float s))
:abs #'abs
:great #'>
:add #'+
:div #'/
:rdiv #'/
:reciprocal #'/
:mult #'*
:sub #'-
:negate #'-
:psqrt #'(lambda (s) (let ((x))
(cond ((>= s 0)
(setq x (isqrt (numerator s)))
(setq x (/ x (isqrt (denominator s))))
(if (= s (* x x)) x nil))
(t nil))))
:add-id #'(lambda () 0)
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (= s 0))
:adjoint #'cl:identity
:mring-to-maxima #'(lambda (s) (simplify `((rat) ,(numerator s) ,(denominator s))))
:maxima-to-mring
#'(lambda (s)
(if (or (floatp s) ($bfloatp s)) (setq s ($rationalize s)))
(if ($ratnump s) (if (integerp s) s (/ ($num s) ($denom s)))
(merror "Unable to convert ~:M to a rational number" s)))))
(setf (get '$rationalfield 'ring) *rationalfield*)
(defparameter *crering*
(make-mring
:name '$crering
:coerce-to-lisp-float nil
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'(lambda (a b) (declare (ignore a)) (eq t (meqp b 0)))
:add #'add
:div #'div
:rdiv #'div
:reciprocal #'(lambda (s) (div 1 s))
:mult #'mult
:sub #'sub
:negate #'(lambda (s) (mult -1 s))
:psqrt #'(lambda (s) (if (member (csign ($ratdisrep s)) `($pos $pz $zero)) (take '(%sqrt) s) nil))
:add-id #'(lambda () 0)
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (eq t (meqp s 0)))
:adjoint #'(lambda (s) (take '($conjugate) s))
:mring-to-maxima #'(lambda (s) s)
:maxima-to-mring #'(lambda (s) ($rat s))))
(setf (get '$crering 'ring) *crering*)
(defparameter *generalring*
(make-mring
:name '$generalring
:coerce-to-lisp-float nil
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'(lambda (a b) (declare (ignore a)) (eq t (meqp b 0)))
:add #'(lambda (a b) ($rectform (add a b)))
:div #'(lambda (a b) ($rectform (div a b)))
:rdiv #'(lambda (a b) ($rectform (div a b)))
:reciprocal #'(lambda (s) (div 1 s))
:mult #'(lambda (a b) ($rectform (mult a b)))
:sub #'(lambda (a b) ($rectform (sub a b)))
:negate #'(lambda (a) (mult -1 a))
:psqrt #'(lambda (s) (if (member (csign s) `($pos $pz $zero)) (take '(%sqrt) s) nil))
:add-id #'(lambda () 0)
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (eq t (meqp s 0)))
:adjoint #'(lambda (s) (take '($conjugate) s))
:mring-to-maxima #'(lambda (s) s)
:maxima-to-mring #'(lambda (s) s)))
(setf (get '$generalring 'ring) *generalring*)
(defparameter *bigfloatfield*
(make-mring
:name '$bigfloatfield
:coerce-to-lisp-float #'(lambda (s)
(setq s ($rectform ($float s)))
(complex ($realpart s) ($imagpart s)))
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'mgrp
:add #'(lambda (a b) ($rectform (add a b)))
:div #'(lambda (a b) ($rectform (div a b)))
:rdiv #'(lambda (a b) ($rectform (div a b)))
:reciprocal #'(lambda (s) (div 1 s))
:mult #'(lambda (a b) ($rectform (mult a b)))
:sub #'(lambda (a b) ($rectform (sub a b)))
:negate #'(lambda (a) (mult -1 a))
:psqrt #'(lambda (s) (if (mlsp s 0) nil (take '(%sqrt) s)))
:add-id #'(lambda () bigfloatzero)
:mult-id #'(lambda () bigfloatone)
:fzerop #'(lambda (s) (like s bigfloatzero))
:adjoint #'cl:identity
:mring-to-maxima #'(lambda (s) s)
:maxima-to-mring #'(lambda (s)
(setq s ($rectform ($bfloat s)))
(if (or (eq s '$%i) (complex-number-p s 'bigfloat-or-number-p)) s
(merror "Unable to convert matrix entry to a big float")))))
(setf (get '$bigfloatfield 'ring) *bigfloatfield*)
(defun fp-abs (a)
(list (abs (first a)) (second a)))
(defun fp+ (a b)
(cond ((= (first a) 0.0) b)
((= (first b) 0.0) a)
(t
(let ((s (+ (first a) (first b))))
(if (= 0.0 s) (merror "floating point divide by zero"))
(list s (ceiling (+ 1
(abs (/ (* (first a) (second a)) s))
(abs (/ (* (first b) (second b)) s)))))))))
(defun fp- (a b)
(cond ((= (first a) 0.0) (list (- (first b)) (second b)))
((= (first b) 0.0) a)
(t
(let ((s (- (first a) (first b))))
(if (= 0.0 s) (merror "floating point divide by zero"))
(list s (ceiling (+ 1
(abs (/ (* (first a) (second a)) s))
(abs (/ (* (first b) (second b)) s)))))))))
(defun fp* (a b)
(if (or (= (first a) 0.0) (= (first b) 0.0)) (list 0.0 0)
(list (* (first a) (first b)) (+ 1 (second a) (second b)))))
(defun fp/ (a b)
(if (= (first a) 0) (list 0.0 0)
(list (/ (first a) (first b)) (+ 1 (second a) (second b)))))
(defun $addmatrices(fn &rest m)
(mfuncall '$apply '$matrixmap `((mlist) ,fn ,@m)))
(defparameter *runningerror*
(make-mring
:name '$runningerror
:coerce-to-lisp-float #'(lambda (s) (if (consp s) (first s) s))
:abs #'fp-abs
:great #'(lambda (a b) (> (first a) (first b)))
:add #'fp+
:div #'fp/
:rdiv #'fp/
:reciprocal #'(lambda (s) (fp/ (list 1 0) s))
:mult #'fp*
:sub #'fp-
:negate #'(lambda (s) (list (- (first s)) (second s)))
:psqrt #'(lambda (s) (if (> (first s) 0) (list (cl:sqrt (first s)) (+ 1 (second s))) nil))
:add-id #'(lambda () (list 0 0))
:mult-id #'(lambda () (list 1 0))
:fzerop #'(lambda (s) (like (first s) 0))
:adjoint #'cl:identity
:mring-to-maxima #'(lambda (s) `((mlist) ,@s))
:maxima-to-mring #'(lambda (s) (if ($listp s) (cdr s) (list ($float s) 1)))))
(setf (get '$runningerror 'ring) *runningerror*)
(defparameter *noncommutingring*
(make-mring
:name '$noncommutingring
:coerce-to-lisp-float nil
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'(lambda (a b) (declare (ignore a)) (eq t (meqp b 0)))
:add #'(lambda (a b) (add a b))
:div #'(lambda (a b) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(setq b (if ($matrixp b) ($invert_by_lu b '$noncommutingring)
(take '(mncexpt) b -1)))
(take '(mnctimes) a b))))
:rdiv #'(lambda (a b) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(setq b (if ($matrixp b) ($invert_by_lu b '$noncommutingring)
(take '(mncexpt) b -1)))
(take '(mnctimes) b a))))
:reciprocal #'(lambda (s) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(if ($matrixp s) ($invert_by_lu s '$noncommutingring)
(take '(mncexpt) s -1)))))
:mult #'(lambda (a b) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(take '(mnctimes) a b))))
:sub #'(lambda (a b) (sub a b))
:negate #'(lambda (a) (mult -1 a))
:add-id #'(lambda () 0)
:psqrt #'(lambda (s) (take '(%sqrt) s))
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (eq t (meqp s 0)))
:adjoint #'(lambda (s) ($transpose (take '($conjugate) s)))
:mring-to-maxima #'cl:identity
:maxima-to-mring #'cl:identity))
(setf (get '$noncommutingring 'ring) *noncommutingring*)
(defun ring-eval (e fld)
(let ((fadd (mring-add fld))
(fnegate (mring-negate fld))
(fmult (mring-mult fld))
(fdiv (mring-div fld))
(fabs (mring-abs fld))
(fconvert (mring-maxima-to-mring fld)))
(cond ((or ($numberp e) (symbolp e))
(funcall fconvert (meval e)))
;; I don't think an empty sum or product is possible here. If it is, append
;; the appropriate initial-value to reduce. Using the :inital-value isn't
;; a problem, but (fp* (a b) (1 0)) --> (a (+ b 1)). A better value is
;; (fp* (a b) (1 0)) --> (a b).
((op-equalp e 'mplus)
(reduce fadd (mapcar #'(lambda (s) (ring-eval s fld)) (margs e)) :from-end t))
((op-equalp e 'mminus)
(funcall fnegate (ring-eval (first (margs e)) fld)))
((op-equalp e 'mtimes)
(reduce fmult (mapcar #'(lambda (s) (ring-eval s fld)) (margs e)) :from-end t))
((op-equalp e 'mquotient)
(funcall fdiv (ring-eval (first (margs e)) fld)(ring-eval (second (margs e)) fld)))
((op-equalp e 'mabs) (funcall fabs (ring-eval (first (margs e)) fld)))
((and (or (eq (mring-name fld) '$floatfield) (eq (mring-name fld) '$complexfield))
(consp e) (consp (car e)) (gethash (mop e) *flonum-op*))
(apply (gethash (mop e) *flonum-op*) (mapcar #'(lambda (s) (ring-eval s fld)) (margs e))))
(t (merror "Unable to evaluate ~:M in the ring '~:M'" e (mring-name fld))))))
(defmspec $ringeval (e)
(let ((fld (get (or (car (member (nth 2 e) $%mrings)) '$generalring) 'ring)))
(funcall (mring-mring-to-maxima fld) (ring-eval (nth 1 e) fld))))
| null | https://raw.githubusercontent.com/huangjs/cl/96158b3f82f82a6b7d53ef04b3b29c5c8de2dbf7/lib/maxima/share/linearalgebra/mring.lisp | lisp | Kearney NE 68847
This source code is licensed under the terms of the Lisp Lesser
any later version. When the preamble conflicts with the LGPL,
the preamble takes precedence.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
License along with this library; if not, write to the
Let's have version numbers 1,2,3,...
(1) In maxima-grobner.lisp, there is a structure 'ring.'
not be defined for a ring; when this is the case, a function
can signal an error.
so a mring need n't
be a ring. But a mring is 'close' to being a ring.
Description of the mring fields:
I don't think an empty sum or product is possible here. If it is, append
the appropriate initial-value to reduce. Using the :inital-value isn't
a problem, but (fp* (a b) (1 0)) --> (a (+ b 1)). A better value is
(fp* (a b) (1 0)) --> (a b). | A Maxima ring stucture
Copyright ( C ) 2005 , 2007 ,
Barton Willis
Department of Mathematics
University of Nebraska at Kearney
GNU Public License ( LLGPL ) . The LLGPL consists of a preamble , published
by Franz Inc. ( ) , and the GNU
Library General Public License ( LGPL ) , version 2 , or ( at your option )
Library General Public License for details .
You should have received a copy of the GNU Library General Public
Free Software Foundation , Inc. , 51 Franklin St , Fifth Floor ,
Boston , MA 02110 - 1301 , USA .
(eval-when (:compile-toplevel :load-toplevel :execute)
($put '$mring 1 '$version))
( 2 ) Some functions in this structure , for example ' great ' might
(defstruct mring
name
coerce-to-lisp-float
abs
great
add
div
rdiv
reciprocal
mult
sub
negate
psqrt
add-id
mult-id
fzerop
adjoint
maxima-to-mring
mring-to-maxima)
(eval-when (:compile-toplevel :load-toplevel :execute)
(defmvar $%mrings `((mlist) $floatfield $complexfield $rationalfield $crering $generalring $bigfloatfield
$runningerror $noncommutingring)))
(defun $require_ring (ringname pos fun)
(if ($member ringname $%mrings) (get ringname 'ring)
(merror "The ~:M argument of the function '~:M' must be the name of a ring" pos fun)))
(defparameter *floatfield*
(make-mring
:name '$floatfield
:coerce-to-lisp-float #'cl:identity
:abs #'abs
:great #'>
:add #'+
:div #'/
:rdiv #'/
:reciprocal #'/
:mult #'*
:sub #'-
:negate #'-
:psqrt #'(lambda (s) (if (>= s 0) (cl:sqrt s) nil))
:add-id #'(lambda () 0.0)
:mult-id #'(lambda () 1.0)
:fzerop #'(lambda (s) (< (abs s) (* 4 flonum-epsilon)))
:adjoint #'cl:identity
:mring-to-maxima #'cl:identity
:maxima-to-mring #'(lambda (s)
(setq s ($float s))
(if (floatp s) s (merror "Unable to convert ~:M to a long float" s)))))
(setf (get '$floatfield 'ring) *floatfield*)
(defparameter *complexfield*
(make-mring
:name '$complexfield
:coerce-to-lisp-float #'cl:identity
:abs #'abs
:great #'>
:add #'+
:div #'/
:rdiv #'/
:reciprocal #'/
:mult #'*
:sub #'-
:negate #'-
:psqrt #'(lambda (s) (if (and (= 0 (imagpart s)) (>= (realpart s) 0)) (cl:sqrt s) nil))
:add-id #'(lambda () 0.0)
:mult-id #'(lambda () 1.0)
:fzerop #'(lambda (s) (< (abs s) (* 4 flonum-epsilon)))
:adjoint #'cl:conjugate
was
:maxima-to-mring #'(lambda (s)
(progn
(setq s ($rectform (meval s)))
(if (complex-number-p s 'float-or-rational-p)
(complex ($float ($realpart s)) ($float ($imagpart s)))
(merror "Unable to convert ~:M to a complex long float" s))))))
(setf (get '$complexfield 'ring) *complexfield*)
(defparameter *rationalfield*
(make-mring
:name '$rationalfield
:coerce-to-lisp-float #'(lambda (s) ($float s))
:abs #'abs
:great #'>
:add #'+
:div #'/
:rdiv #'/
:reciprocal #'/
:mult #'*
:sub #'-
:negate #'-
:psqrt #'(lambda (s) (let ((x))
(cond ((>= s 0)
(setq x (isqrt (numerator s)))
(setq x (/ x (isqrt (denominator s))))
(if (= s (* x x)) x nil))
(t nil))))
:add-id #'(lambda () 0)
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (= s 0))
:adjoint #'cl:identity
:mring-to-maxima #'(lambda (s) (simplify `((rat) ,(numerator s) ,(denominator s))))
:maxima-to-mring
#'(lambda (s)
(if (or (floatp s) ($bfloatp s)) (setq s ($rationalize s)))
(if ($ratnump s) (if (integerp s) s (/ ($num s) ($denom s)))
(merror "Unable to convert ~:M to a rational number" s)))))
(setf (get '$rationalfield 'ring) *rationalfield*)
(defparameter *crering*
(make-mring
:name '$crering
:coerce-to-lisp-float nil
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'(lambda (a b) (declare (ignore a)) (eq t (meqp b 0)))
:add #'add
:div #'div
:rdiv #'div
:reciprocal #'(lambda (s) (div 1 s))
:mult #'mult
:sub #'sub
:negate #'(lambda (s) (mult -1 s))
:psqrt #'(lambda (s) (if (member (csign ($ratdisrep s)) `($pos $pz $zero)) (take '(%sqrt) s) nil))
:add-id #'(lambda () 0)
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (eq t (meqp s 0)))
:adjoint #'(lambda (s) (take '($conjugate) s))
:mring-to-maxima #'(lambda (s) s)
:maxima-to-mring #'(lambda (s) ($rat s))))
(setf (get '$crering 'ring) *crering*)
(defparameter *generalring*
(make-mring
:name '$generalring
:coerce-to-lisp-float nil
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'(lambda (a b) (declare (ignore a)) (eq t (meqp b 0)))
:add #'(lambda (a b) ($rectform (add a b)))
:div #'(lambda (a b) ($rectform (div a b)))
:rdiv #'(lambda (a b) ($rectform (div a b)))
:reciprocal #'(lambda (s) (div 1 s))
:mult #'(lambda (a b) ($rectform (mult a b)))
:sub #'(lambda (a b) ($rectform (sub a b)))
:negate #'(lambda (a) (mult -1 a))
:psqrt #'(lambda (s) (if (member (csign s) `($pos $pz $zero)) (take '(%sqrt) s) nil))
:add-id #'(lambda () 0)
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (eq t (meqp s 0)))
:adjoint #'(lambda (s) (take '($conjugate) s))
:mring-to-maxima #'(lambda (s) s)
:maxima-to-mring #'(lambda (s) s)))
(setf (get '$generalring 'ring) *generalring*)
(defparameter *bigfloatfield*
(make-mring
:name '$bigfloatfield
:coerce-to-lisp-float #'(lambda (s)
(setq s ($rectform ($float s)))
(complex ($realpart s) ($imagpart s)))
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'mgrp
:add #'(lambda (a b) ($rectform (add a b)))
:div #'(lambda (a b) ($rectform (div a b)))
:rdiv #'(lambda (a b) ($rectform (div a b)))
:reciprocal #'(lambda (s) (div 1 s))
:mult #'(lambda (a b) ($rectform (mult a b)))
:sub #'(lambda (a b) ($rectform (sub a b)))
:negate #'(lambda (a) (mult -1 a))
:psqrt #'(lambda (s) (if (mlsp s 0) nil (take '(%sqrt) s)))
:add-id #'(lambda () bigfloatzero)
:mult-id #'(lambda () bigfloatone)
:fzerop #'(lambda (s) (like s bigfloatzero))
:adjoint #'cl:identity
:mring-to-maxima #'(lambda (s) s)
:maxima-to-mring #'(lambda (s)
(setq s ($rectform ($bfloat s)))
(if (or (eq s '$%i) (complex-number-p s 'bigfloat-or-number-p)) s
(merror "Unable to convert matrix entry to a big float")))))
(setf (get '$bigfloatfield 'ring) *bigfloatfield*)
(defun fp-abs (a)
(list (abs (first a)) (second a)))
(defun fp+ (a b)
(cond ((= (first a) 0.0) b)
((= (first b) 0.0) a)
(t
(let ((s (+ (first a) (first b))))
(if (= 0.0 s) (merror "floating point divide by zero"))
(list s (ceiling (+ 1
(abs (/ (* (first a) (second a)) s))
(abs (/ (* (first b) (second b)) s)))))))))
(defun fp- (a b)
(cond ((= (first a) 0.0) (list (- (first b)) (second b)))
((= (first b) 0.0) a)
(t
(let ((s (- (first a) (first b))))
(if (= 0.0 s) (merror "floating point divide by zero"))
(list s (ceiling (+ 1
(abs (/ (* (first a) (second a)) s))
(abs (/ (* (first b) (second b)) s)))))))))
(defun fp* (a b)
(if (or (= (first a) 0.0) (= (first b) 0.0)) (list 0.0 0)
(list (* (first a) (first b)) (+ 1 (second a) (second b)))))
(defun fp/ (a b)
(if (= (first a) 0) (list 0.0 0)
(list (/ (first a) (first b)) (+ 1 (second a) (second b)))))
(defun $addmatrices(fn &rest m)
(mfuncall '$apply '$matrixmap `((mlist) ,fn ,@m)))
(defparameter *runningerror*
(make-mring
:name '$runningerror
:coerce-to-lisp-float #'(lambda (s) (if (consp s) (first s) s))
:abs #'fp-abs
:great #'(lambda (a b) (> (first a) (first b)))
:add #'fp+
:div #'fp/
:rdiv #'fp/
:reciprocal #'(lambda (s) (fp/ (list 1 0) s))
:mult #'fp*
:sub #'fp-
:negate #'(lambda (s) (list (- (first s)) (second s)))
:psqrt #'(lambda (s) (if (> (first s) 0) (list (cl:sqrt (first s)) (+ 1 (second s))) nil))
:add-id #'(lambda () (list 0 0))
:mult-id #'(lambda () (list 1 0))
:fzerop #'(lambda (s) (like (first s) 0))
:adjoint #'cl:identity
:mring-to-maxima #'(lambda (s) `((mlist) ,@s))
:maxima-to-mring #'(lambda (s) (if ($listp s) (cdr s) (list ($float s) 1)))))
(setf (get '$runningerror 'ring) *runningerror*)
(defparameter *noncommutingring*
(make-mring
:name '$noncommutingring
:coerce-to-lisp-float nil
:abs #'(lambda (s) (simplify (mfuncall '$cabs s)))
:great #'(lambda (a b) (declare (ignore a)) (eq t (meqp b 0)))
:add #'(lambda (a b) (add a b))
:div #'(lambda (a b) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(setq b (if ($matrixp b) ($invert_by_lu b '$noncommutingring)
(take '(mncexpt) b -1)))
(take '(mnctimes) a b))))
:rdiv #'(lambda (a b) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(setq b (if ($matrixp b) ($invert_by_lu b '$noncommutingring)
(take '(mncexpt) b -1)))
(take '(mnctimes) b a))))
:reciprocal #'(lambda (s) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(if ($matrixp s) ($invert_by_lu s '$noncommutingring)
(take '(mncexpt) s -1)))))
:mult #'(lambda (a b) (progn
(let (($matrix_element_mult ".")
($matrix_element_transpose '$transpose))
(take '(mnctimes) a b))))
:sub #'(lambda (a b) (sub a b))
:negate #'(lambda (a) (mult -1 a))
:add-id #'(lambda () 0)
:psqrt #'(lambda (s) (take '(%sqrt) s))
:mult-id #'(lambda () 1)
:fzerop #'(lambda (s) (eq t (meqp s 0)))
:adjoint #'(lambda (s) ($transpose (take '($conjugate) s)))
:mring-to-maxima #'cl:identity
:maxima-to-mring #'cl:identity))
(setf (get '$noncommutingring 'ring) *noncommutingring*)
(defun ring-eval (e fld)
(let ((fadd (mring-add fld))
(fnegate (mring-negate fld))
(fmult (mring-mult fld))
(fdiv (mring-div fld))
(fabs (mring-abs fld))
(fconvert (mring-maxima-to-mring fld)))
(cond ((or ($numberp e) (symbolp e))
(funcall fconvert (meval e)))
((op-equalp e 'mplus)
(reduce fadd (mapcar #'(lambda (s) (ring-eval s fld)) (margs e)) :from-end t))
((op-equalp e 'mminus)
(funcall fnegate (ring-eval (first (margs e)) fld)))
((op-equalp e 'mtimes)
(reduce fmult (mapcar #'(lambda (s) (ring-eval s fld)) (margs e)) :from-end t))
((op-equalp e 'mquotient)
(funcall fdiv (ring-eval (first (margs e)) fld)(ring-eval (second (margs e)) fld)))
((op-equalp e 'mabs) (funcall fabs (ring-eval (first (margs e)) fld)))
((and (or (eq (mring-name fld) '$floatfield) (eq (mring-name fld) '$complexfield))
(consp e) (consp (car e)) (gethash (mop e) *flonum-op*))
(apply (gethash (mop e) *flonum-op*) (mapcar #'(lambda (s) (ring-eval s fld)) (margs e))))
(t (merror "Unable to evaluate ~:M in the ring '~:M'" e (mring-name fld))))))
(defmspec $ringeval (e)
(let ((fld (get (or (car (member (nth 2 e) $%mrings)) '$generalring) 'ring)))
(funcall (mring-mring-to-maxima fld) (ring-eval (nth 1 e) fld))))
|
04c92c7feb3a26c257237bb942c0d46b43d0a5e2c36e43a2eac943e1a6baebdc | haroldcarr/plutus-pioneer-program-3 | StateMachine.hs | {-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
# LANGUAGE MultiParamTypeClasses #
# LANGUAGE NoImplicitPrelude #
# LANGUAGE OverloadedStrings #
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TemplateHaskell #-}
# LANGUAGE TypeApplications #
# LANGUAGE TypeFamilies #
{-# LANGUAGE TypeOperators #-}
module Week07.StateMachine
( Game (..)
, GameChoice (..)
, FirstParams (..)
, SecondParams (..)
, GameSchema
, Last (..)
, ThreadToken
, Text
, endpoints
) where
import Control.Monad hiding (fmap)
import Data.Aeson (FromJSON, ToJSON)
import Data.Monoid (Last (..))
import Data.Text (Text, pack)
import GHC.Generics (Generic)
import Ledger hiding (singleton)
import Ledger.Ada as Ada
import Ledger.Constraints as Constraints
import Ledger.Typed.Tx
import qualified Ledger.Typed.Scripts as Scripts
import Plutus.Contract as Contract
import Plutus.Contract.StateMachine
import qualified PlutusTx
import PlutusTx.Prelude hiding (Semigroup(..), check, unless)
import Playground.Contract (ToSchema)
import Prelude (Semigroup (..), Show (..), String)
import qualified Prelude
------------------------------------------------------------------------------
-- on-chain
data Game = Game
{ gFirst :: !PaymentPubKeyHash
, gSecond :: !PaymentPubKeyHash
, gStake :: !Integer
, gPlayDeadline :: !POSIXTime
, gRevealDeadline :: !POSIXTime
DIFF : use StateMachine 's method
} deriving (Show, Generic, FromJSON, ToJSON, Prelude.Eq)
PlutusTx.makeLift ''Game
data GameChoice = Zero | One
deriving (Show, Generic, FromJSON, ToJSON, ToSchema, Prelude.Eq, Prelude.Ord)
instance Eq GameChoice where
# INLINABLE (= =) #
Zero == Zero = True
One == One = True
_ == _ = False
PlutusTx.unstableMakeIsData ''GameChoice
data GameDatum
= GameDatum BuiltinByteString (Maybe GameChoice)
DIFF : new constructor
deriving Show
instance Eq GameDatum where
# INLINABLE (= =) #
GameDatum bs mc == GameDatum bs' mc' = (bs == bs') && (mc == mc')
Finished == Finished = True
_ == _ = False
PlutusTx.unstableMakeIsData ''GameDatum
data GameRedeemer = Play GameChoice | Reveal BuiltinByteString | ClaimFirst | ClaimSecond
deriving Show
PlutusTx.unstableMakeIsData ''GameRedeemer
# INLINABLE lovelaces #
lovelaces :: Value -> Integer
lovelaces = Ada.getLovelace . Ada.fromValue
# INLINABLE gameDatum #
gameDatum :: TxOut -> (DatumHash -> Maybe Datum) -> Maybe GameDatum
gameDatum o f = do
dh <- txOutDatum o
Datum d <- f dh
PlutusTx.fromBuiltinData d
# INLINABLE transition #
-- Transition function of state machine, somewhat corresponds to previous validator.
Core business logic .
--
-- from StateMachine Nothing if illegal transition
-- v Just with constraints and new state
transition :: Game -> State GameDatum -> GameRedeemer -> Maybe (TxConstraints Void Void, State GameDatum)
Datum
-- v
transition game s r = case (stateValue s, stateData s, r) of
DIFF : no check for state token because StateMachine takes care of that behind the scenes
1st player has previusly moved , the 2nd player is MOVING , chosing ' c '
(v, GameDatum bs Nothing, Play c)
-- 2 1
| lovelaces v == gStake game -> Just ( Constraints.mustBeSignedBy (gSecond game) <>
5
Constraints.mustValidateIn (to $ gPlayDeadline game)
4 3
, State (GameDatum bs $ Just c) (lovelaceValueOf $ 2 * gStake game)
)
both players have previously moved , 1st player wins , so reveals to collect stake
DIFF : nothing corresponding to ' checkNonce ' here because it can not be expressed as a constraint .
-- See definition and use of 'check' below.
DIFF : no ' nftToFirst ' : in StateMachine the state token auto minted when machine started , auto burned when finished
(v, GameDatum _ (Just _), Reveal _)
| lovelaces v == (2 * gStake game) -> Just ( Constraints.mustBeSignedBy (gFirst game) <>
Constraints.mustValidateIn (to $ gRevealDeadline game)
, State Finished mempty
)
2nd player has not previously moved , 1st player sees that deadline past , so reclaims their stake
(v, GameDatum _ Nothing, ClaimFirst)
| lovelaces v == gStake game -> Just ( Constraints.mustBeSignedBy (gFirst game) <>
Constraints.mustValidateIn (from $ 1 + gPlayDeadline game)
, State Finished mempty
)
both players have previously moved , the 1st player sees they have lost so remains quiet ,
the 2nd player sees reveal deadline has past , so claims game stake ( wins ) .
(v, GameDatum _ (Just _), ClaimSecond)
| lovelaces v == (2 * gStake game) -> Just ( Constraints.mustBeSignedBy (gSecond game) <>
Constraints.mustValidateIn (from $ 1 + gRevealDeadline game)
, State Finished mempty
)
-- any other transitions are not legal
_ -> Nothing
# INLINABLE final #
final :: GameDatum -> Bool
final Finished = True
final _ = False
# INLINABLE check #
-- checkNonce
check :: BuiltinByteString -> BuiltinByteString -> GameDatum -> GameRedeemer -> ScriptContext -> Bool
check bsZero' bsOne' (GameDatum bs (Just c)) (Reveal nonce) _ =
sha2_256 (nonce `appendByteString` if c == Zero then bsZero' else bsOne') == bs
check _ _ _ _ _ = True
# INLINABLE gameStateMachine #
gameStateMachine :: Game -> BuiltinByteString -> BuiltinByteString -> StateMachine GameDatum GameRedeemer
gameStateMachine game bsZero' bsOne' = StateMachine
{ smTransition = transition game
, smFinal = final
, smCheck = check bsZero' bsOne'
, smThreadToken = Just $ gToken game
}
# INLINABLE mkGameValidator #
mkGameValidator :: Game -> BuiltinByteString -> BuiltinByteString -> GameDatum -> GameRedeemer -> ScriptContext -> Bool
mkGameValidator game bsZero' bsOne' = mkValidator $ gameStateMachine game bsZero' bsOne'
type Gaming = StateMachine GameDatum GameRedeemer
bsZero, bsOne :: BuiltinByteString
bsZero = "0"
bsOne = "1"
-- for off-chain
gameStateMachine' :: Game -> StateMachine GameDatum GameRedeemer
gameStateMachine' game = gameStateMachine game bsZero bsOne
typedGameValidator :: Game -> Scripts.TypedValidator Gaming
typedGameValidator game = Scripts.mkTypedValidator @Gaming
($$(PlutusTx.compile [|| mkGameValidator ||])
`PlutusTx.applyCode` PlutusTx.liftCode game
`PlutusTx.applyCode` PlutusTx.liftCode bsZero
`PlutusTx.applyCode` PlutusTx.liftCode bsOne)
$$(PlutusTx.compile [|| wrap ||])
where
wrap = Scripts.wrapValidator @GameDatum @GameRedeemer
gameValidator :: Game -> Validator
gameValidator = Scripts.validatorScript . typedGameValidator
gameAddress :: Game -> Ledger.Address
gameAddress = scriptAddress . gameValidator
------------------------------------------------------------------------------
-- off-chain
gameClient :: Game -> StateMachineClient GameDatum GameRedeemer
gameClient game = mkStateMachineClient $ StateMachineInstance (gameStateMachine' game) (typedGameValidator game)
data FirstParams = FirstParams
{ fpSecond :: !PaymentPubKeyHash
, fpStake :: !Integer
, fpPlayDeadline :: !POSIXTime
, fpRevealDeadline :: !POSIXTime
, fpNonce :: !BuiltinByteString
, fpChoice :: !GameChoice
} deriving (Show, Generic, FromJSON, ToJSON, ToSchema)
turn StateMachine error into Text
mapError' :: Contract w s SMContractError a -> Contract w s Text a
mapError' = mapError $ pack . show
waitUntilTimeHasPassed :: AsContractError e => POSIXTime -> Contract w s e ()
waitUntilTimeHasPassed t = void $ awaitTime t >> waitNSlots 1
firstGame :: forall s. FirstParams -> Contract (Last ThreadToken) s Text ()
firstGame fp = do
pkh <- Contract.ownPaymentPubKeyHash
DIFF : state token via StateMachine
let game = Game
{ gFirst = pkh
, gSecond = fpSecond fp
, gStake = fpStake fp
, gPlayDeadline = fpPlayDeadline fp
, gRevealDeadline = fpRevealDeadline fp
, gToken = tt
}
client = gameClient game
v = lovelaceValueOf (fpStake fp)
c = fpChoice fp
bs = sha2_256 $ fpNonce fp `appendByteString` if c == Zero then bsZero else bsOne
DIFF : mints state token / NFT ; creates ( containing NFT ) at state machine address with given datum and value
void $ mapError' $ runInitialise client (GameDatum bs Nothing) v
logInfo @String $ "made first move: " ++ show (fpChoice fp)
DIFF : uses to tell 2nd player where the game is : identified by
tell $ Last $ Just tt
waitUntilTimeHasPassed $ fpPlayDeadline fp
DIFF v
m <- mapError' $ getOnChainState client
case m of
Nothing -> throwError "game output not found"
Just (o, _) -> case tyTxOutData $ ocsTxOut o of
GameDatum _ Nothing -> do
logInfo @String "second player did not play"
void $ mapError' $ runStep client ClaimFirst
logInfo @String "first player reclaimed stake"
GameDatum _ (Just c') | c' == c -> do
logInfo @String "second player played and lost"
void $ mapError' $ runStep client $ Reveal $ fpNonce fp
logInfo @String "first player revealed and won"
_ -> logInfo @String "second player played and won"
data SecondParams = SecondParams
{ spFirst :: !PaymentPubKeyHash
, spStake :: !Integer
, spPlayDeadline :: !POSIXTime
, spRevealDeadline :: !POSIXTime
, spChoice :: !GameChoice
, spToken :: !ThreadToken
} deriving (Show, Generic, FromJSON, ToJSON)
secondGame :: forall w s. SecondParams -> Contract w s Text ()
secondGame sp = do
pkh <- Contract.ownPaymentPubKeyHash
let game = Game
{ gFirst = spFirst sp
, gSecond = pkh
, gStake = spStake sp
, gPlayDeadline = spPlayDeadline sp
, gRevealDeadline = spRevealDeadline sp
, gToken = spToken sp
}
client = gameClient game
find that represents the game
case m of
Nothing -> logInfo @String "no running game found"
Just (o, _) -> case tyTxOutData $ ocsTxOut o of
GameDatum _ Nothing -> do
logInfo @String "running game found"
void $ mapError' $ runStep client $ Play $ spChoice sp
logInfo @String $ "made second move: " ++ show (spChoice sp)
waitUntilTimeHasPassed $ spRevealDeadline sp
m' <- mapError' $ getOnChainState client
case m' of
Nothing -> logInfo @String "first player won"
Just _ -> do
logInfo @String "first player didn't reveal"
void $ mapError' $ runStep client ClaimSecond
logInfo @String "second player won"
_ -> throwError "unexpected datum"
type GameSchema = Endpoint "first" FirstParams .\/ Endpoint "second" SecondParams
endpoints :: Contract (Last ThreadToken) GameSchema Text ()
endpoints = awaitPromise (first `select` second) >> endpoints
where
first = endpoint @"first" firstGame
second = endpoint @"second" secondGame
| null | https://raw.githubusercontent.com/haroldcarr/plutus-pioneer-program-3/b66f285d4bb5457ced740c50adb490b50aa0ed74/Lecture-07/StateMachine.hs | haskell | # LANGUAGE DataKinds #
# LANGUAGE DeriveAnyClass #
# LANGUAGE DeriveGeneric #
# LANGUAGE FlexibleContexts #
# LANGUAGE ScopedTypeVariables #
# LANGUAGE TemplateHaskell #
# LANGUAGE TypeOperators #
----------------------------------------------------------------------------
on-chain
Transition function of state machine, somewhat corresponds to previous validator.
from StateMachine Nothing if illegal transition
v Just with constraints and new state
v
2 1
See definition and use of 'check' below.
any other transitions are not legal
checkNonce
for off-chain
----------------------------------------------------------------------------
off-chain | # LANGUAGE MultiParamTypeClasses #
# LANGUAGE NoImplicitPrelude #
# LANGUAGE OverloadedStrings #
# LANGUAGE TypeApplications #
# LANGUAGE TypeFamilies #
module Week07.StateMachine
( Game (..)
, GameChoice (..)
, FirstParams (..)
, SecondParams (..)
, GameSchema
, Last (..)
, ThreadToken
, Text
, endpoints
) where
import Control.Monad hiding (fmap)
import Data.Aeson (FromJSON, ToJSON)
import Data.Monoid (Last (..))
import Data.Text (Text, pack)
import GHC.Generics (Generic)
import Ledger hiding (singleton)
import Ledger.Ada as Ada
import Ledger.Constraints as Constraints
import Ledger.Typed.Tx
import qualified Ledger.Typed.Scripts as Scripts
import Plutus.Contract as Contract
import Plutus.Contract.StateMachine
import qualified PlutusTx
import PlutusTx.Prelude hiding (Semigroup(..), check, unless)
import Playground.Contract (ToSchema)
import Prelude (Semigroup (..), Show (..), String)
import qualified Prelude
data Game = Game
{ gFirst :: !PaymentPubKeyHash
, gSecond :: !PaymentPubKeyHash
, gStake :: !Integer
, gPlayDeadline :: !POSIXTime
, gRevealDeadline :: !POSIXTime
DIFF : use StateMachine 's method
} deriving (Show, Generic, FromJSON, ToJSON, Prelude.Eq)
PlutusTx.makeLift ''Game
data GameChoice = Zero | One
deriving (Show, Generic, FromJSON, ToJSON, ToSchema, Prelude.Eq, Prelude.Ord)
instance Eq GameChoice where
# INLINABLE (= =) #
Zero == Zero = True
One == One = True
_ == _ = False
PlutusTx.unstableMakeIsData ''GameChoice
data GameDatum
= GameDatum BuiltinByteString (Maybe GameChoice)
DIFF : new constructor
deriving Show
instance Eq GameDatum where
# INLINABLE (= =) #
GameDatum bs mc == GameDatum bs' mc' = (bs == bs') && (mc == mc')
Finished == Finished = True
_ == _ = False
PlutusTx.unstableMakeIsData ''GameDatum
data GameRedeemer = Play GameChoice | Reveal BuiltinByteString | ClaimFirst | ClaimSecond
deriving Show
PlutusTx.unstableMakeIsData ''GameRedeemer
# INLINABLE lovelaces #
lovelaces :: Value -> Integer
lovelaces = Ada.getLovelace . Ada.fromValue
# INLINABLE gameDatum #
gameDatum :: TxOut -> (DatumHash -> Maybe Datum) -> Maybe GameDatum
gameDatum o f = do
dh <- txOutDatum o
Datum d <- f dh
PlutusTx.fromBuiltinData d
# INLINABLE transition #
Core business logic .
transition :: Game -> State GameDatum -> GameRedeemer -> Maybe (TxConstraints Void Void, State GameDatum)
Datum
transition game s r = case (stateValue s, stateData s, r) of
DIFF : no check for state token because StateMachine takes care of that behind the scenes
1st player has previusly moved , the 2nd player is MOVING , chosing ' c '
(v, GameDatum bs Nothing, Play c)
| lovelaces v == gStake game -> Just ( Constraints.mustBeSignedBy (gSecond game) <>
5
Constraints.mustValidateIn (to $ gPlayDeadline game)
4 3
, State (GameDatum bs $ Just c) (lovelaceValueOf $ 2 * gStake game)
)
both players have previously moved , 1st player wins , so reveals to collect stake
DIFF : nothing corresponding to ' checkNonce ' here because it can not be expressed as a constraint .
DIFF : no ' nftToFirst ' : in StateMachine the state token auto minted when machine started , auto burned when finished
(v, GameDatum _ (Just _), Reveal _)
| lovelaces v == (2 * gStake game) -> Just ( Constraints.mustBeSignedBy (gFirst game) <>
Constraints.mustValidateIn (to $ gRevealDeadline game)
, State Finished mempty
)
2nd player has not previously moved , 1st player sees that deadline past , so reclaims their stake
(v, GameDatum _ Nothing, ClaimFirst)
| lovelaces v == gStake game -> Just ( Constraints.mustBeSignedBy (gFirst game) <>
Constraints.mustValidateIn (from $ 1 + gPlayDeadline game)
, State Finished mempty
)
both players have previously moved , the 1st player sees they have lost so remains quiet ,
the 2nd player sees reveal deadline has past , so claims game stake ( wins ) .
(v, GameDatum _ (Just _), ClaimSecond)
| lovelaces v == (2 * gStake game) -> Just ( Constraints.mustBeSignedBy (gSecond game) <>
Constraints.mustValidateIn (from $ 1 + gRevealDeadline game)
, State Finished mempty
)
_ -> Nothing
# INLINABLE final #
final :: GameDatum -> Bool
final Finished = True
final _ = False
# INLINABLE check #
check :: BuiltinByteString -> BuiltinByteString -> GameDatum -> GameRedeemer -> ScriptContext -> Bool
check bsZero' bsOne' (GameDatum bs (Just c)) (Reveal nonce) _ =
sha2_256 (nonce `appendByteString` if c == Zero then bsZero' else bsOne') == bs
check _ _ _ _ _ = True
# INLINABLE gameStateMachine #
gameStateMachine :: Game -> BuiltinByteString -> BuiltinByteString -> StateMachine GameDatum GameRedeemer
gameStateMachine game bsZero' bsOne' = StateMachine
{ smTransition = transition game
, smFinal = final
, smCheck = check bsZero' bsOne'
, smThreadToken = Just $ gToken game
}
# INLINABLE mkGameValidator #
mkGameValidator :: Game -> BuiltinByteString -> BuiltinByteString -> GameDatum -> GameRedeemer -> ScriptContext -> Bool
mkGameValidator game bsZero' bsOne' = mkValidator $ gameStateMachine game bsZero' bsOne'
type Gaming = StateMachine GameDatum GameRedeemer
bsZero, bsOne :: BuiltinByteString
bsZero = "0"
bsOne = "1"
gameStateMachine' :: Game -> StateMachine GameDatum GameRedeemer
gameStateMachine' game = gameStateMachine game bsZero bsOne
typedGameValidator :: Game -> Scripts.TypedValidator Gaming
typedGameValidator game = Scripts.mkTypedValidator @Gaming
($$(PlutusTx.compile [|| mkGameValidator ||])
`PlutusTx.applyCode` PlutusTx.liftCode game
`PlutusTx.applyCode` PlutusTx.liftCode bsZero
`PlutusTx.applyCode` PlutusTx.liftCode bsOne)
$$(PlutusTx.compile [|| wrap ||])
where
wrap = Scripts.wrapValidator @GameDatum @GameRedeemer
gameValidator :: Game -> Validator
gameValidator = Scripts.validatorScript . typedGameValidator
gameAddress :: Game -> Ledger.Address
gameAddress = scriptAddress . gameValidator
gameClient :: Game -> StateMachineClient GameDatum GameRedeemer
gameClient game = mkStateMachineClient $ StateMachineInstance (gameStateMachine' game) (typedGameValidator game)
data FirstParams = FirstParams
{ fpSecond :: !PaymentPubKeyHash
, fpStake :: !Integer
, fpPlayDeadline :: !POSIXTime
, fpRevealDeadline :: !POSIXTime
, fpNonce :: !BuiltinByteString
, fpChoice :: !GameChoice
} deriving (Show, Generic, FromJSON, ToJSON, ToSchema)
turn StateMachine error into Text
mapError' :: Contract w s SMContractError a -> Contract w s Text a
mapError' = mapError $ pack . show
waitUntilTimeHasPassed :: AsContractError e => POSIXTime -> Contract w s e ()
waitUntilTimeHasPassed t = void $ awaitTime t >> waitNSlots 1
firstGame :: forall s. FirstParams -> Contract (Last ThreadToken) s Text ()
firstGame fp = do
pkh <- Contract.ownPaymentPubKeyHash
DIFF : state token via StateMachine
let game = Game
{ gFirst = pkh
, gSecond = fpSecond fp
, gStake = fpStake fp
, gPlayDeadline = fpPlayDeadline fp
, gRevealDeadline = fpRevealDeadline fp
, gToken = tt
}
client = gameClient game
v = lovelaceValueOf (fpStake fp)
c = fpChoice fp
bs = sha2_256 $ fpNonce fp `appendByteString` if c == Zero then bsZero else bsOne
DIFF : mints state token / NFT ; creates ( containing NFT ) at state machine address with given datum and value
void $ mapError' $ runInitialise client (GameDatum bs Nothing) v
logInfo @String $ "made first move: " ++ show (fpChoice fp)
DIFF : uses to tell 2nd player where the game is : identified by
tell $ Last $ Just tt
waitUntilTimeHasPassed $ fpPlayDeadline fp
DIFF v
m <- mapError' $ getOnChainState client
case m of
Nothing -> throwError "game output not found"
Just (o, _) -> case tyTxOutData $ ocsTxOut o of
GameDatum _ Nothing -> do
logInfo @String "second player did not play"
void $ mapError' $ runStep client ClaimFirst
logInfo @String "first player reclaimed stake"
GameDatum _ (Just c') | c' == c -> do
logInfo @String "second player played and lost"
void $ mapError' $ runStep client $ Reveal $ fpNonce fp
logInfo @String "first player revealed and won"
_ -> logInfo @String "second player played and won"
data SecondParams = SecondParams
{ spFirst :: !PaymentPubKeyHash
, spStake :: !Integer
, spPlayDeadline :: !POSIXTime
, spRevealDeadline :: !POSIXTime
, spChoice :: !GameChoice
, spToken :: !ThreadToken
} deriving (Show, Generic, FromJSON, ToJSON)
secondGame :: forall w s. SecondParams -> Contract w s Text ()
secondGame sp = do
pkh <- Contract.ownPaymentPubKeyHash
let game = Game
{ gFirst = spFirst sp
, gSecond = pkh
, gStake = spStake sp
, gPlayDeadline = spPlayDeadline sp
, gRevealDeadline = spRevealDeadline sp
, gToken = spToken sp
}
client = gameClient game
find that represents the game
case m of
Nothing -> logInfo @String "no running game found"
Just (o, _) -> case tyTxOutData $ ocsTxOut o of
GameDatum _ Nothing -> do
logInfo @String "running game found"
void $ mapError' $ runStep client $ Play $ spChoice sp
logInfo @String $ "made second move: " ++ show (spChoice sp)
waitUntilTimeHasPassed $ spRevealDeadline sp
m' <- mapError' $ getOnChainState client
case m' of
Nothing -> logInfo @String "first player won"
Just _ -> do
logInfo @String "first player didn't reveal"
void $ mapError' $ runStep client ClaimSecond
logInfo @String "second player won"
_ -> throwError "unexpected datum"
type GameSchema = Endpoint "first" FirstParams .\/ Endpoint "second" SecondParams
endpoints :: Contract (Last ThreadToken) GameSchema Text ()
endpoints = awaitPromise (first `select` second) >> endpoints
where
first = endpoint @"first" firstGame
second = endpoint @"second" secondGame
|
72a4cfad1fa10790830909b389a22b4bd69711296e435b9755fa754c223f94e5 | joneshf/open-source | Exit.hs | -- |
-- Module: Exit
-- Description: An effect for 'Exit'ing the program.
--
Copyright : ( c ) , 2018
-- License: BSD3
-- Maintainer:
-- Stability: experimental
module Exit
( Exit(..)
, Exiter
, die
, failure
, io
) where
import "freer-simple" Control.Monad.Freer (Eff, Member)
import "text" Data.Text (Text)
import qualified "freer-simple" Control.Monad.Freer
import qualified "text" Data.Text
import qualified "base" System.Exit
-- |
-- How to exit the program.
data Exit a where
Die :: Text -> Exit a
Failure :: Int -> Exit a
-- |
-- Synonym to clean up type signatures
type Exiter f = forall a. Exit a -> f a
-- |
-- Interpret the exit effect to 'IO'.
io :: Exiter IO
io = \case
Die msg -> System.Exit.die (Data.Text.unpack msg)
Failure code -> System.Exit.exitWith (System.Exit.ExitFailure code)
-- |
-- Helper to make exiting with a 'Die' easier.
die :: (Member Exit e) => Text -> Eff e a
die = Control.Monad.Freer.send . Die
-- |
-- Helper to make exiting with a 'Failure' easier.
failure :: (Member Exit e) => Eff e a
failure = Control.Monad.Freer.send (Failure 1)
| null | https://raw.githubusercontent.com/joneshf/open-source/e3412fc68c654d89a8d3af4e12ac19c70e3055ec/packages/dhall-javascript/haskell/src/Exit.hs | haskell | |
Module: Exit
Description: An effect for 'Exit'ing the program.
License: BSD3
Maintainer:
Stability: experimental
|
How to exit the program.
|
Synonym to clean up type signatures
|
Interpret the exit effect to 'IO'.
|
Helper to make exiting with a 'Die' easier.
|
Helper to make exiting with a 'Failure' easier. | Copyright : ( c ) , 2018
module Exit
( Exit(..)
, Exiter
, die
, failure
, io
) where
import "freer-simple" Control.Monad.Freer (Eff, Member)
import "text" Data.Text (Text)
import qualified "freer-simple" Control.Monad.Freer
import qualified "text" Data.Text
import qualified "base" System.Exit
data Exit a where
Die :: Text -> Exit a
Failure :: Int -> Exit a
type Exiter f = forall a. Exit a -> f a
io :: Exiter IO
io = \case
Die msg -> System.Exit.die (Data.Text.unpack msg)
Failure code -> System.Exit.exitWith (System.Exit.ExitFailure code)
die :: (Member Exit e) => Text -> Eff e a
die = Control.Monad.Freer.send . Die
failure :: (Member Exit e) => Eff e a
failure = Control.Monad.Freer.send (Failure 1)
|
da065e772f1ef5751927d87f4114fa92a572e706055ef1bcc4055e15ab638a94 | GlideAngle/flare-timing | EdgeToEdge.hs | {-# OPTIONS_GHC -fplugin Data.UnitsOfMeasure.Plugin #-}
module EdgeToEdge (edgeToEdgeUnits, toLatLngDbl) where
import Prelude hiding (span)
import Data.Ratio((%))
import qualified Data.Number.FixedFunctions as F
import Data.List (inits)
import Test.Tasty (TestTree, TestName, testGroup)
import Test.Tasty.HUnit as HU ((@?=), (@?), testCase)
import Data.UnitsOfMeasure ((/:), (-:), u, convert)
import Data.UnitsOfMeasure.Internal (Quantity(..))
import Data.Bifunctor.Flip (Flip(..))
import Flight.Units ()
import Flight.LatLng (Lat(..), Lng(..), LatLng(..))
import Flight.LatLng.Rational (Epsilon(..), defEps)
import Flight.Distance (TaskDistance(..), PathDistance(..), SpanLatLng, fromKms)
import Flight.Zone (QBearing, Bearing(..), Radius(..), Zone(..))
import Flight.Zone.Cylinder
(Samples(..), SampleParams(..), Tolerance(..), CircumSample, ZonePoint(..))
import Flight.Zone.Path (costSegment, distancePointToPoint)
import qualified Flight.Earth.Sphere.PointToPoint.Rational as Rat (distanceHaversine)
import qualified Flight.Earth.Sphere.Cylinder.Rational as Rat (circumSample)
import qualified Flight.Task as FS (distanceEdgeToEdge)
import Flight.Task (Zs(..), AngleCut(..), CostSegment)
import Flight.Earth.Sphere.Separated (separatedZones)
import Data.Ratio.Rounding (dpRound)
(.>=.) :: (Show a, Show b) => a -> b -> String
(.>=.) x y = show x ++ " >= " ++ show y
(.<=.) :: (Show a, Show b) => a -> b -> String
(.<=.) x y = show x ++ " <= " ++ show y
(.~=.) :: (Show a, Show b) => a -> b -> String
(.~=.) x y = show x ++ " ~= " ++ show y
edgeToEdgeUnits :: TestTree
edgeToEdgeUnits = testGroup "Zone edge shortest path unit tests"
[ circumSampleUnits
, forbesUnits
]
m100 :: Tolerance Rational
m100 = Tolerance $ 100 % 1
mm100 :: Tolerance Rational
mm100 = Tolerance $ 100 % 1000
mm30 :: Tolerance Rational
mm30 = Tolerance $ 30 % 1000
mm10 :: Tolerance Rational
mm10 = Tolerance $ 10 % 1000
mm1 :: Tolerance Rational
mm1 = Tolerance $ 1 % 1000
sampleParams :: SampleParams Rational
sampleParams = SampleParams { spSamples = Samples 100
, spTolerance = mm30
}
ll :: LatLng Rational [u| rad |]
ll =
LatLng (lat, lng)
where
oneRadian = [u| 1 rad |]
lat = Lat oneRadian
lng = Lng oneRadian
br :: QBearing Rational [u| rad |]
br = let (Epsilon e) = defEps in (Bearing . MkQuantity $ F.pi e)
circumSampleUnits :: TestTree
circumSampleUnits = testGroup "Points just within the zone"
[ testGroup "Outside the zone."
[ HU.testCase
"No points > 0mm outside a 40m cylinder when searching within 1mm" $
zpFilter (>) ll [u| 40 m |]
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 40 m |]) ll))
@?= []
, HU.testCase
"No points > 0mm outside a 400m cylinder when searching within 1mm" $
zpFilter (>) ll [u| 400 m |]
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 400 m |]) ll))
@?= []
, HU.testCase
"No points > 0mm outside a 1km cylinder when searching within 10mm" $
zpFilter (>) ll (convert [u| 1 km |])
(fst $ cs (sampleParams { spTolerance = mm10 }) br Nothing (Cylinder (Radius $ convert [u| 1 km |]) ll))
@?= []
, HU.testCase
"No points > 0mm outside a 10km cylinder when searching within 100mm" $
zpFilter (>) ll (convert [u| 10 km |])
(fst $ cs (sampleParams { spTolerance = mm100 }) br Nothing (Cylinder (Radius $ convert [u| 10 km |]) ll))
@?= []
, HU.testCase
"No points > 0m outside a 100km cylinder when searching within 100m" $
zpFilter (>) ll (convert [u| 100 km |])
(fst $ cs (sampleParams { spTolerance = m100 }) br Nothing (Cylinder (Radius $ convert [u| 100 km |]) ll))
@?= []
]
, testGroup "Inside the zone."
[ HU.testCase
"No points > 1mm inside a 40m cylinder when searching within 1mm" $
zpFilter (<) ll ([u| 40 m |] -: convert [u| 1 mm |])
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 40 m |]) ll))
@?= []
, HU.testCase
"No points > 1mm inside a 400m cylinder when searching within 1mm" $
zpFilter (<) ll ([u| 400 m |] -: convert [u| 1 mm |])
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 400 m |]) ll))
@?= []
, HU.testCase
"No points > 9mm inside a 1km cylinder when searching within 10mm" $
zpFilter (<) ll (convert [u| 1 km |] -: convert [u| 9 mm |])
(fst $ cs (sampleParams { spTolerance = mm10 }) br Nothing (Cylinder (Radius $ convert [u| 1 km |]) ll))
@?= []
, HU.testCase
"No points > 97mm inside a 10km cylinder when searching within 100mm" $
zpFilter (<) ll (convert [u| 10 km |] -: convert [u| 97 mm |])
(fst $ cs (sampleParams { spTolerance = mm100 }) br Nothing (Cylinder (Radius $ convert [u| 10 km |]) ll))
@?= []
, HU.testCase
"No points > 85m inside a 100km cylinder when searching within 100m" $
zpFilter (<) ll (convert [u| 100 km |] -: [u| 85 m |])
(fst $ cs (sampleParams { spTolerance = m100 }) br Nothing (Cylinder (Radius $ convert [u| 100 km |] ) ll))
@?= []
]
]
zpFilter
:: (Quantity Rational [u| m |] -> Quantity Rational [u| m |] -> Bool)
-> LatLng Rational [u| rad |]
-> Quantity Rational [u| m |]
-> [ZonePoint Rational]
-> [ZonePoint Rational]
zpFilter cmp origin d =
filter (\x -> zpDistance origin x `cmp` d)
zpDistance
:: LatLng Rational [u| rad |]
-> ZonePoint Rational
-> Quantity Rational [u| m |]
zpDistance origin ZonePoint{point} =
d
where
TaskDistance d =
edgesSum $ distancePointToPoint span [Point origin, Point point]
-- | The input pair is in degrees while the output is in radians.
toLatLngDbl :: (Double, Double) -> LatLng Double [u| rad |]
toLatLngDbl (lat, lng) =
LatLng (Lat lat'', Lng lng'')
where
lat' = MkQuantity lat :: Quantity Double [u| deg |]
lng' = MkQuantity lng :: Quantity Double [u| deg |]
lat'' = convert lat' :: Quantity Double [u| rad |]
lng'' = convert lng' :: Quantity Double [u| rad |]
-- | The input pair is in degrees while the output is in radians.
toLL :: (Double, Double) -> LatLng Rational [u| rad |]
toLL (lat, lng) =
LatLng (Lat lat'', Lng lng'')
where
lat' = (MkQuantity $ toRational lat) :: Quantity Rational [u| deg |]
lng' = (MkQuantity $ toRational lng) :: Quantity Rational [u| deg |]
lat'' = convert lat' :: Quantity Rational [u| rad |]
lng'' = convert lng' :: Quantity Rational [u| rad |]
forbesUnits :: TestTree
forbesUnits = testGroup "Forbes 2011/2012 distances"
[ day1PartUnits
, day1Units
, day2PartUnits
, day2Units
, day3PartUnits
, day3Units
, day4PartUnits
, day4Units
, day5PartUnits
, day5Units
, day6PartUnits
, day6Units
, day7PartUnits
, day7Units
, day8PartUnits
, day8Units
]
mkPartDayUnits :: TestName
-> [Zone Rational]
-> TaskDistance Rational
-> TestTree
mkPartDayUnits title zs (TaskDistance d) = testGroup title
[ HU.testCase
("point-to-point distance " ++ show td' ++ " ~= " ++ show tdR)
$ (tdR' == tdR) @? tdR' .~=. tdR
]
where
dKm = convert d :: Quantity Rational [u| km |]
Flip r = dpRound 3 <$> Flip dKm
tdR = TaskDistance (convert r :: Quantity Rational [u| m |])
td'@(TaskDistance d') = edgesSum $ distancePointToPoint span zs
dKm' = convert d' :: Quantity Rational [u| km |]
Flip r' = dpRound 3 <$> Flip dKm'
tdR' = TaskDistance (convert r' :: Quantity Rational [u| m |])
day1PartUnits :: TestTree
day1PartUnits = testGroup "Task 1 [...]"
[ mkPartDayUnits "Task 1 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 1 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 1 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 33.85373, 147.94195)
, (negate 33.4397, 148.34533)
, (negate 33.61965, 148.4099)
]
p1 = take 2 xs
d1 = fromKms [u| 54.755578 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 59.276627 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 20.88547 km |]
day2PartUnits :: TestTree
day2PartUnits = testGroup "Task 2 [...]"
[ mkPartDayUnits "Task 2 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 2 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 2 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.90223, 147.98492)
, (negate 32.9536, 147.55457)
, (negate 33.12592, 147.91043)
]
p1 = take 2 xs
d1 = fromKms [u| 51.290669 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 40.569544 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 38.30752 km |]
day3PartUnits :: TestTree
day3PartUnits = testGroup "Task 3 [...]"
[ mkPartDayUnits "Task 3 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 3 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 3 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 34.02107, 148.2233)
, (negate 34.11795, 148.5013)
, (negate 34.82197, 148.66543)
]
p1 = take 2 xs
d1 = fromKms [u| 78.147093 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 27.780099 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 79.716223 km |]
day4PartUnits :: TestTree
day4PartUnits = testGroup "Task 4 [...]"
[ mkPartDayUnits "Task 4 [x, x, _]" p1' d1
, mkPartDayUnits "Task 4 [_, x, x]" p2 d2
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.90223, 147.98492)
, (negate 32.46363, 148.989)
]
-- NOTE: Use p1' to avoid an hlint duplication warning.
p1' = take 2 xs
d1 = fromKms [u| 51.290669 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 105.87255 km |]
day5PartUnits :: TestTree
day5PartUnits = testGroup "Task 5 [...]"
[ mkPartDayUnits "Task 5 [x, x, _]" p1 d1
, mkPartDayUnits "Task 5 [_, x, x]" p2 d2
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.56608, 148.22657)
, (negate 32.0164, 149.43363)
]
p1 = take 2 xs
d1 = fromKms [u| 92.601904 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 128.87562 km |]
day6PartUnits :: TestTree
day6PartUnits = testGroup "Task 6 [...]"
[ mkPartDayUnits "Task 6 [x, x, _]" p1 d1
, mkPartDayUnits "Task 6 [_, x, x]" p2 d2
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.19498, 147.76218)
, (negate 31.69323, 148.29623)
]
p1 = take 2 xs
d1 = fromKms [u| 130.665489 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 75.17947 km |]
day7PartUnits :: TestTree
day7PartUnits = testGroup "Task 7 [...]"
[ mkPartDayUnits "Task 7 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 7 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 7 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.9536, 147.55457)
, (negate 32.76052, 148.64958)
, (negate 32.93585, 148.74947)
]
p1 = take 2 xs
d1 = fromKms [u| 57.365312 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 104.509732 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 21.613886 km |]
day8PartUnits :: TestTree
day8PartUnits = testGroup "Task 8 [...]"
[ mkPartDayUnits "Task 8 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 8 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 8 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 33.75343, 147.52865)
, (negate 33.12908, 147.57323)
, (negate 33.361, 147.9315)
]
p1 = take 2 xs
d1 = fromKms [u| 57.427511 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 69.547668 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 42.131961 km |]
mkDayUnits :: TestName
-> [Zone Rational]
-> TaskDistance Rational
-> [TaskDistance Rational]
-> TestTree
mkDayUnits title pDay dDay' dsDay' = testGroup title
[ HU.testCase "zones are separated" $ separatedZones span pDay @?= True
, HU.testCase
("point-to-point distance >= " ++ show dDay)
$ (ppDay >= dDay) @? ppDay .>=. dDay
, HU.testCase
("edge-to-edge distance <= " ++ show dDay)
$ (eeDay <= dDay) @? eeDay .<=. dDay
, HU.testCase
("point-to-point distances "
++ show ppDayInits
++ " >= "
++ show dsDay
) $
(ppDayInits >= dsDay) @? ppDayInits .>=. dsDay
, HU.testCase
("edge-to-edge distances "
++ show eeDayInits
++ " <= "
++ show dsDay
) $
distLess eeDayInits dsDay @? eeDayInits .<=. dsDay
]
where
dDay = tdRound dDay'
dsDay = tdRound <$> dsDay'
pp :: [Zone Rational] -> PathDistance Rational
pp = distancePointToPoint span
ee :: [Zone Rational] -> PathDistance Rational
ee xs =
case distanceEdgeToEdge' xs of
Zs x -> x
_ -> PathDistance (TaskDistance [u| 0 m |]) []
ppDay :: TaskDistance Rational
ppDay = tdRound . edgesSum $ pp pDay
eeDay :: TaskDistance Rational
eeDay = tdRound . edgesSum $ ee pDay
pDayInits :: [[Zone Rational]]
pDayInits = drop 1 $ inits pDay
ppDayInits :: [TaskDistance Rational]
ppDayInits = tdRound . edgesSum . pp <$> pDayInits
eeDayInits :: [TaskDistance Rational]
eeDayInits = tdRound . edgesSum . ee <$> pDayInits
distLess :: Ord a => [a] -> [a] -> Bool
distLess xs ys = take 1 (reverse xs) <= take 1 (reverse ys)
tdRound :: TaskDistance Rational -> TaskDistance Rational
tdRound (TaskDistance (MkQuantity d)) =
TaskDistance . MkQuantity . dpRound 2 $ d
day1Units :: TestTree
day1Units = mkDayUnits "Task 1" pDay1 dDay1 dsDay1
day2Units :: TestTree
day2Units = mkDayUnits "Task 2" pDay2 dDay2 dsDay2
day3Units :: TestTree
day3Units = mkDayUnits "Task 3" pDay3 dDay3 dsDay3
day4Units :: TestTree
day4Units = mkDayUnits "Task 4" pDay4 dDay4 dsDay4
day5Units :: TestTree
day5Units = mkDayUnits "Task 5" pDay5 dDay5 dsDay5
day6Units :: TestTree
day6Units = mkDayUnits "Task 6" pDay6 dDay6 dsDay6
day7Units :: TestTree
day7Units = mkDayUnits "Task 7" pDay7 dDay7 dsDay7
day8Units :: TestTree
day8Units = mkDayUnits "Task 8" pDay8 dDay8 dsDay8
NOTE : The task distances show below are taken from the competition * .fsdb file
at the path /Fs / FsCompetition / FsTasks / FsTask / FsTaskScoreParams / FsTaskDistToTp .
The first distance is not 9.9 kms , 10 kms - 100 m.
Some flight instruments use WGS84 and others use the FAI spheriod . To
accomodate this , there is a tolerance of either 0.01 % or 0.5 % used , depending
on the competition . For category 1 events since 2015 - 01 - 01 it is 0.01 % .
Category 2 events can elect to use the wider margin . This tolerance is used for
working out if tracks reach control zones .
The optimised route is worked out by FS in 2D space from a UTM projection . This
accounts for the discrepency with errors coming from choosing wrong waypoints
for the optimal route and from the conversion of these points back to the FAI
sphere .
TODO : Find out why the first distance is 9.882 and not 9.9 km .
< FsTaskDistToTp tp_no="1 " distance="0 " / >
< FsTaskDistToTp tp_no="2 " distance="9.882 " / >
< FsTaskDistToTp tp_no="3 " distance="54.254 " / >
< FsTaskDistToTp tp_no="4 " distance="112.779 " / >
< FsTaskDistToTp tp_no="5 " distance="133.357 " / >
The unit tests here are not comparing to distances of FS but to point to point
distances worked out on the FAI sphere .
SEE :
SEE : -type.co.uk/scripts/latlong-vincenty.html
SEE :
SEE : /
-33.36137 , 147.93207 , -33.85373 , 147.94195 , -33.4397 , 148.34533 , -33.61965 , 148.4099
-33.36137 , 147.93207 , -33.85373 , 147.94195
-33.85373 , 147.94195 , -33.4397 , 148.34533
-33.4397 , 148.34533 , -33.61965 , 148.4099
NOTE : Point to point distances using method .
= >
54.76
59.28
20.89
54.76 + 59.28 + 20.89
= > 134.93
134.93 - 10 - 0.4
= > 124.53
NOTE : Point to point distances using Vincenty method .
= >
54.62
59.24
20.84
54.62 + 59.24 + 20.84
= > 134.7
134.7 - 10 - 0.4
= > 124.30
- sphericalPointToPoint :
distance : 134.917675
legs :
- 54.755578
- 59.276627
- 20.88547
legsSum :
- 54.755578
- 114.032205
- 134.917675
waypoints :
- lat : -33.36137
: 147.93207
- lat : -33.85372998
lng : 147.94194999
- lat : -33.4397
lng : 148.34532999
- lat : -33.61965
: 148.40989999
NOTE: The task distances show below are taken from the competition *.fsdb file
at the path /Fs/FsCompetition/FsTasks/FsTask/FsTaskScoreParams/FsTaskDistToTp.
The first distance is not 9.9 kms, 10 kms - 100m.
Some flight instruments use WGS84 and others use the FAI spheriod. To
accomodate this, there is a tolerance of either 0.01% or 0.5% used, depending
on the competition. For category 1 events since 2015-01-01 it is 0.01%.
Category 2 events can elect to use the wider margin. This tolerance is used for
working out if tracks reach control zones.
The optimised route is worked out by FS in 2D space from a UTM projection. This
accounts for the discrepency with errors coming from choosing wrong waypoints
for the optimal route and from the conversion of these points back to the FAI
sphere.
TODO: Find out why the first distance is 9.882 and not 9.9 km.
<FsTaskDistToTp tp_no="1" distance="0" />
<FsTaskDistToTp tp_no="2" distance="9.882" />
<FsTaskDistToTp tp_no="3" distance="54.254" />
<FsTaskDistToTp tp_no="4" distance="112.779" />
<FsTaskDistToTp tp_no="5" distance="133.357" />
The unit tests here are not comparing to distances of FS but to point to point
distances worked out on the FAI sphere.
SEE:
SEE: -type.co.uk/scripts/latlong-vincenty.html
SEE:
SEE: /
-33.36137, 147.93207, -33.85373, 147.94195, -33.4397, 148.34533, -33.61965, 148.4099
-33.36137, 147.93207, -33.85373, 147.94195
-33.85373, 147.94195, -33.4397, 148.34533
-33.4397, 148.34533, -33.61965, 148.4099
NOTE: Point to point distances using Haversine method.
=>
54.76
59.28
20.89
54.76 + 59.28 + 20.89
=> 134.93
134.93 - 10 - 0.4
=> 124.53
NOTE: Point to point distances using Vincenty method.
=>
54.62
59.24
20.84
54.62 + 59.24 + 20.84
=> 134.7
134.7 - 10 - 0.4
=> 124.30
- sphericalPointToPoint:
distance: 134.917675
legs:
- 54.755578
- 59.276627
- 20.88547
legsSum:
- 54.755578
- 114.032205
- 134.917675
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -33.85372998
lng: 147.94194999
- lat: -33.4397
lng: 148.34532999
- lat: -33.61965
lng: 148.40989999
-}
pDay1 :: [Zone Rational]
pDay1 =
[ Cylinder (Radius $ MkQuantity 100) $ toLL (negate 33.36137, 147.93207)
, Cylinder (Radius $ MkQuantity 10000) $ toLL (negate 33.36137, 147.93207)
, Cylinder (Radius $ MkQuantity 400) $ toLL (negate 33.85373, 147.94195)
, Cylinder (Radius $ MkQuantity 400) $ toLL (negate 33.4397, 148.34533)
, Cylinder (Radius $ MkQuantity 400) $ toLL (negate 33.61965, 148.4099)
]
dDay1 :: TaskDistance Rational
dDay1 = fromKms [u| 134.917675 km |]
dsDay1 :: [TaskDistance Rational]
dsDay1 =
fromKms . MkQuantity <$>
[ 0
, 9.9
, 54.755578
, 114.032205
, 134.917675
]
-33.36137 , 147.93207 , -32.90223 , 147.98492 , -32.9536 , 147.55457 , -33.12592 , 147.91043
-33.36137 , 147.93207 , -32.90223 , 147.98492
-32.90223 , 147.98492 , -32.9536 , 147.55457
-32.9536 , 147.55457 , -33.12592 , 147.91043
NOTE : Point to point distances using method .
= >
51.29
40.57
38.31
51.29 + 40.57 + 38.31
= > 130.17
130.17 - 5 - 0.4
= > 124.77
NOTE : Point to point distances using Vincenty method .
= >
51.16
40.65
38.34
51.16 + 40.65 + 38.34
= > 130.15
130.15 - 5 - 0.4
= > 124.75
- sphericalPointToPoint :
distance : 130.167733
legs :
- 51.290669
- 40.569544
- 38.30752
legsSum :
- 51.290669
- 91.860213
- 130.167733
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.90223
: 147.98491999
- lat : -32.9536
: 147.55457
- lat : -33.12592
lng : 147.91042999
-33.36137, 147.93207, -32.90223, 147.98492, -32.9536, 147.55457, -33.12592, 147.91043
-33.36137, 147.93207, -32.90223, 147.98492
-32.90223, 147.98492, -32.9536, 147.55457
-32.9536, 147.55457, -33.12592, 147.91043
NOTE: Point to point distances using Haversine method.
=>
51.29
40.57
38.31
51.29 + 40.57 + 38.31
=> 130.17
130.17 - 5 - 0.4
=> 124.77
NOTE: Point to point distances using Vincenty method.
=>
51.16
40.65
38.34
51.16 + 40.65 + 38.34
=> 130.15
130.15 - 5 - 0.4
=> 124.75
- sphericalPointToPoint:
distance: 130.167733
legs:
- 51.290669
- 40.569544
- 38.30752
legsSum:
- 51.290669
- 91.860213
- 130.167733
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.90223
lng: 147.98491999
- lat: -32.9536
lng: 147.55457
- lat: -33.12592
lng: 147.91042999
-}
pDay2 :: [Zone Rational]
pDay2 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.90223, 147.98492))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.9536, 147.55457))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 33.12592, 147.91043))
]
dDay2 :: TaskDistance Rational
dDay2 = fromKms [u| 130.167733 km |]
dsDay2 :: [TaskDistance Rational]
dsDay2 =
fromKms . MkQuantity <$>
[ 0
, 4.9
, 51.290669
, 91.860213
, 130.167733
]
-33.36137 , 147.93207 , -34.02107 , 148.2233 , -34.11795 , 148.5013 , -34.82197 , 148.66543
-33.36137 , 147.93207 , -34.02107 , 148.2233
-34.02107 , 148.2233 , -34.11795 , 148.5013
-34.11795 , 148.5013 , -34.82197 , 148.66543
NOTE : Point to point distances using method .
= >
78.15
27.78
79.72
78.15 + 27.78 + 79.72
= > 185.65
185.65 - 25 - 0.4
= > 160.25
NOTE : Point to point distances using Vincenty method .
= >
77.99
27.82
79.54
77.99 + 27.82 + 79.54
= > 185.35
185.35 - 25 - 0.4
= > 159.95
- sphericalPointToPoint :
distance : 185.643415
legs :
- 78.147093
- 27.780099
- 79.716223
legsSum :
- 78.147093
- 105.927192
- 185.643415
waypoints :
- lat : -33.36137
: 147.93207
- lat : -34.02107
lng : 148.22329998
- lat : -34.11795
: 148.50129999
- lat : -34.82197
lng : 148.66542999
-33.36137, 147.93207, -34.02107, 148.2233, -34.11795, 148.5013, -34.82197, 148.66543
-33.36137, 147.93207, -34.02107, 148.2233
-34.02107, 148.2233, -34.11795, 148.5013
-34.11795, 148.5013, -34.82197, 148.66543
NOTE: Point to point distances using Haversine method.
=>
78.15
27.78
79.72
78.15 + 27.78 + 79.72
=> 185.65
185.65 - 25 - 0.4
=> 160.25
NOTE: Point to point distances using Vincenty method.
=>
77.99
27.82
79.54
77.99 + 27.82 + 79.54
=> 185.35
185.35 - 25 - 0.4
=> 159.95
- sphericalPointToPoint:
distance: 185.643415
legs:
- 78.147093
- 27.780099
- 79.716223
legsSum:
- 78.147093
- 105.927192
- 185.643415
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -34.02107
lng: 148.22329998
- lat: -34.11795
lng: 148.50129999
- lat: -34.82197
lng: 148.66542999
-}
pDay3 :: [Zone Rational]
pDay3 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 25000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 34.02107, 148.2233))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 34.11795, 148.5013))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 34.82197, 148.66543))
]
dDay3 :: TaskDistance Rational
dDay3 = fromKms [u| 185.643415 km |]
dsDay3 :: [TaskDistance Rational]
dsDay3 =
fromKms . MkQuantity <$>
[ 0
, 24.9
, 78.147093
, 105.927192
, 185.643415
]
-33.36137 , 147.93207 , -32.90223 , 147.98492 , -32.46363 , 148.989
-33.36137 , 147.93207 , -32.90223 , 147.98492
-32.90223 , 147.98492 , -32.46363 , 148.989
NOTE : Point to point distances using method .
= >
51.29
105.9
51.29 + 105.9
= > 157.19
157.19 - 15 - 0.4
= > 141.79
NOTE : Point to point distances using Vincenty method .
= >
51.16
106
51.16 + 106
= > 157.16
157.16 - 15 - 0.4
= > 141.76
- sphericalPointToPoint :
distance : 157.16322
legs :
- 51.290669
- 105.87255
legsSum :
- 51.290669
- 157.16322
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.90223
: 147.98491999
- lat : -32.46363
: 148.989
-33.36137, 147.93207, -32.90223, 147.98492, -32.46363, 148.989
-33.36137, 147.93207, -32.90223, 147.98492
-32.90223, 147.98492, -32.46363, 148.989
NOTE: Point to point distances using Haversine method.
=>
51.29
105.9
51.29 + 105.9
=> 157.19
157.19 - 15 - 0.4
=> 141.79
NOTE: Point to point distances using Vincenty method.
=>
51.16
106
51.16 + 106
=> 157.16
157.16 - 15 - 0.4
=> 141.76
- sphericalPointToPoint:
distance: 157.16322
legs:
- 51.290669
- 105.87255
legsSum:
- 51.290669
- 157.16322
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.90223
lng: 147.98491999
- lat: -32.46363
lng: 148.989
-}
pDay4 :: [Zone Rational]
pDay4 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 15000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 25000) (toLL (negate 32.90223, 147.98492))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.46363, 148.989))
]
dDay4 :: TaskDistance Rational
dDay4 = fromKms [u| 157.16322 km |]
dsDay4 :: [TaskDistance Rational]
dsDay4 =
fromKms . MkQuantity <$>
[ 0
, 14.9
, 51.290669
, 157.16322
]
-33.36137 , 147.93207 , -32.56608 , 148.22657 , -32.0164 , 149.43363
-33.36137 , 147.93207 , -32.56608 , 148.22657
-32.56608 , 148.22657 , -32.0164 , 149.43363
NOTE : Point to point distances using method .
= >
92.6
128.9
92.6 + 128.9
= > 221.5
221.5 - 15 - 0.4
= > 206.1
NOTE : Point to point distances using Vincenty method .
= >
92.4
129
92.4 + 129
= > 221.4
221.4 - 15 - 0.4
= > 206.0
- sphericalPointToPoint :
distance : 221.477524
legs :
- 92.601904
- 128.87562
legsSum :
- 92.601904
- 221.477524
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.56607998
lng : 148.22657
- lat : -32.01639998
lng : 149.43362998
-33.36137, 147.93207, -32.56608, 148.22657, -32.0164, 149.43363
-33.36137, 147.93207, -32.56608, 148.22657
-32.56608, 148.22657, -32.0164, 149.43363
NOTE: Point to point distances using Haversine method.
=>
92.6
128.9
92.6 + 128.9
=> 221.5
221.5 - 15 - 0.4
=> 206.1
NOTE: Point to point distances using Vincenty method.
=>
92.4
129
92.4 + 129
=> 221.4
221.4 - 15 - 0.4
=> 206.0
- sphericalPointToPoint:
distance: 221.477524
legs:
- 92.601904
- 128.87562
legsSum:
- 92.601904
- 221.477524
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.56607998
lng: 148.22657
- lat: -32.01639998
lng: 149.43362998
-}
pDay5 :: [Zone Rational]
pDay5 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 15000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 32.56608, 148.22657))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.0164, 149.43363))
]
dDay5 :: TaskDistance Rational
dDay5 = fromKms [u| 221.477524 km |]
dsDay5 :: [TaskDistance Rational]
dsDay5 =
fromKms . MkQuantity <$>
[ 0
, 14.9
, 92.601904
, 221.477524
]
-33.36137 , 147.93207 , -32.19498 , 147.76218 , -31.69323 , 148.29623
-33.36137 , 147.93207 , -32.19498 , 147.76218
-32.19498 , 147.76218 , -31.69323 , 148.29623
NOTE : Point to point distances using method .
= >
130.7
75.18
130.7 + 75.18
= > 205.88
205.88 - 15 - 0.4
= > 190.48
NOTE : Point to point distances using Vincenty method .
= >
130.3
75.13
130.3 + 75.13
= > 205.43
205.43 - 15 - 0.4
= > 190.03
- sphericalPointToPoint :
distance : 205.844959
legs :
- 130.665489
- 75.17947
legsSum :
- 130.665489
- 205.844959
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.19498
lng : 147.76218
- lat : -31.69322998
: 148.29623
-33.36137, 147.93207, -32.19498, 147.76218, -31.69323, 148.29623
-33.36137, 147.93207, -32.19498, 147.76218
-32.19498, 147.76218, -31.69323, 148.29623
NOTE: Point to point distances using Haversine method.
=>
130.7
75.18
130.7 + 75.18
=> 205.88
205.88 - 15 - 0.4
=> 190.48
NOTE: Point to point distances using Vincenty method.
=>
130.3
75.13
130.3 + 75.13
=> 205.43
205.43 - 15 - 0.4
=> 190.03
- sphericalPointToPoint:
distance: 205.844959
legs:
- 130.665489
- 75.17947
legsSum:
- 130.665489
- 205.844959
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.19498
lng: 147.76218
- lat: -31.69322998
lng: 148.29623
-}
pDay6 :: [Zone Rational]
pDay6 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 15000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 32.19498, 147.76218))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 31.69323, 148.29623))
]
dDay6 :: TaskDistance Rational
dDay6 = fromKms [u| 205.844959 km |]
dsDay6 :: [TaskDistance Rational]
dsDay6 =
fromKms . MkQuantity <$>
[ 0
, 14.9
, 130.665489
, 205.844959
]
-33.36137 , 147.93207 , -32.9536 , 147.55457 , -32.76052 , 148.64958 , -32.93585 , 148.74947
-33.36137 , 147.93207 , -32.9536 , 147.55457
-32.9536 , 147.55457 , -32.76052 , 148.64958
-32.76052 , 148.64958 , -32.93585 , 148.74947
NOTE : Point to point distances using method .
= >
57.37
104.5
21.61
57.37 + 104.5 + 21.61
= > 183.48
183.48 - 10 - 0.4
= > 173.08
NOTE : Point to point distances using Vincenty method .
= >
57.32
104.7
21.58
57.32 + 104.7 + 21.58
= > 183.60
183.60 - 10 - 0.4
= > 173.2
- sphericalPointToPoint :
distance : 183.488931
legs :
- 57.365312
- 104.509732
- 21.613886
legsSum :
- 57.365312
- 161.875045
- 183.488931
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.9536
: 147.55457
- lat : -32.76051998
lng : 148.64957999
- lat : -32.93585
: 148.74947
-33.36137, 147.93207, -32.9536, 147.55457, -32.76052, 148.64958, -32.93585, 148.74947
-33.36137, 147.93207, -32.9536, 147.55457
-32.9536, 147.55457, -32.76052, 148.64958
-32.76052, 148.64958, -32.93585, 148.74947
NOTE: Point to point distances using Haversine method.
=>
57.37
104.5
21.61
57.37 + 104.5 + 21.61
=> 183.48
183.48 - 10 - 0.4
=> 173.08
NOTE: Point to point distances using Vincenty method.
=>
57.32
104.7
21.58
57.32 + 104.7 + 21.58
=> 183.60
183.60 - 10 - 0.4
=> 173.2
- sphericalPointToPoint:
distance: 183.488931
legs:
- 57.365312
- 104.509732
- 21.613886
legsSum:
- 57.365312
- 161.875045
- 183.488931
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.9536
lng: 147.55457
- lat: -32.76051998
lng: 148.64957999
- lat: -32.93585
lng: 148.74947
-}
pDay7 :: [Zone Rational]
pDay7 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 10000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 32.9536, 147.55457))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.76052, 148.64958))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.93585, 148.74947))
]
dDay7 :: TaskDistance Rational
dDay7 = fromKms [u| 183.488931 km |]
dsDay7 :: [TaskDistance Rational]
dsDay7 =
fromKms . MkQuantity <$>
[ 0
, 9.9
, 57.365312
, 161.875045
, 183.488931
]
-33.36137 , 147.93207 , -33.75343 , 147.52865 , -33.12908 , 147.57323 , -33.361 , 147.9315
-33.36137 , 147.93207 , -33.75343 , 147.52865
-33.75343 , 147.52865 , -33.12908 , 147.57323
-33.12908 , 147.57323 , -33.361 , 147.9315
NOTE : Point to point distances using method .
= >
57.43
69.55
42.13
57.43 + 69.55 + 42.13
= > 169.11
169.11 - 10 - 0.4
= > 158.71
NOTE : Point to point distances using Vincenty method .
= >
57.4
69.37
42.15
57.4 + 69.37 + 42.15
= > 168.92
169.92 - 10 - 0.4
= > 159.52
- sphericalPointToPoint :
distance : 169.10714
legs :
- 57.427511
- 69.547668
- 42.131961
legsSum :
- 57.427511
- 126.975179
- 169.10714
waypoints :
- lat : -33.36137
: 147.93207
- lat : -33.75343
: 147.52864998
- lat : -33.12908
: 147.57322998
- lat : -33.36099999
: 147.93149998
-33.36137, 147.93207, -33.75343, 147.52865, -33.12908, 147.57323, -33.361, 147.9315
-33.36137, 147.93207, -33.75343, 147.52865
-33.75343, 147.52865, -33.12908, 147.57323
-33.12908, 147.57323, -33.361, 147.9315
NOTE: Point to point distances using Haversine method.
=>
57.43
69.55
42.13
57.43 + 69.55 + 42.13
=> 169.11
169.11 - 10 - 0.4
=> 158.71
NOTE: Point to point distances using Vincenty method.
=>
57.4
69.37
42.15
57.4 + 69.37 + 42.15
=> 168.92
169.92 - 10 - 0.4
=> 159.52
- sphericalPointToPoint:
distance: 169.10714
legs:
- 57.427511
- 69.547668
- 42.131961
legsSum:
- 57.427511
- 126.975179
- 169.10714
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -33.75343
lng: 147.52864998
- lat: -33.12908
lng: 147.57322998
- lat: -33.36099999
lng: 147.93149998
-}
pDay8 :: [Zone Rational]
pDay8 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 10000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 33.75343, 147.52865))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 33.12908, 147.57323))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 33.361, 147.9315))
]
dDay8 :: TaskDistance Rational
dDay8 = fromKms [u| 169.10714 km |]
dsDay8 :: [TaskDistance Rational]
dsDay8 =
fromKms . MkQuantity <$>
[ 0
, 9.9
, 57.427511
, 126.975179
, 169.10714
]
distanceEdgeToEdge' :: [Zone Rational] -> Zs (PathDistance Rational)
distanceEdgeToEdge' =
FS.distanceEdgeToEdge span distancePointToPoint segCost cs cut mm30
segCost :: CostSegment Rational
segCost = costSegment span
span :: SpanLatLng Rational
span = Rat.distanceHaversine defEps
cs :: CircumSample Rational
cs = Rat.circumSample
cut :: AngleCut Rational
cut =
AngleCut
{ sweep =
let (Epsilon e) = defEps
in Bearing . MkQuantity $ F.pi e
, nextSweep = nextCut
}
nextCut :: AngleCut Rational -> AngleCut Rational
nextCut x@AngleCut{sweep} =
let (Bearing b) = sweep in x{sweep = Bearing $ b /: 2}
| null | https://raw.githubusercontent.com/GlideAngle/flare-timing/27bd34c1943496987382091441a1c2516c169263/lang-haskell/task/test-suite-task/EdgeToEdge.hs | haskell | # OPTIONS_GHC -fplugin Data.UnitsOfMeasure.Plugin #
| The input pair is in degrees while the output is in radians.
| The input pair is in degrees while the output is in radians.
NOTE: Use p1' to avoid an hlint duplication warning. |
module EdgeToEdge (edgeToEdgeUnits, toLatLngDbl) where
import Prelude hiding (span)
import Data.Ratio((%))
import qualified Data.Number.FixedFunctions as F
import Data.List (inits)
import Test.Tasty (TestTree, TestName, testGroup)
import Test.Tasty.HUnit as HU ((@?=), (@?), testCase)
import Data.UnitsOfMeasure ((/:), (-:), u, convert)
import Data.UnitsOfMeasure.Internal (Quantity(..))
import Data.Bifunctor.Flip (Flip(..))
import Flight.Units ()
import Flight.LatLng (Lat(..), Lng(..), LatLng(..))
import Flight.LatLng.Rational (Epsilon(..), defEps)
import Flight.Distance (TaskDistance(..), PathDistance(..), SpanLatLng, fromKms)
import Flight.Zone (QBearing, Bearing(..), Radius(..), Zone(..))
import Flight.Zone.Cylinder
(Samples(..), SampleParams(..), Tolerance(..), CircumSample, ZonePoint(..))
import Flight.Zone.Path (costSegment, distancePointToPoint)
import qualified Flight.Earth.Sphere.PointToPoint.Rational as Rat (distanceHaversine)
import qualified Flight.Earth.Sphere.Cylinder.Rational as Rat (circumSample)
import qualified Flight.Task as FS (distanceEdgeToEdge)
import Flight.Task (Zs(..), AngleCut(..), CostSegment)
import Flight.Earth.Sphere.Separated (separatedZones)
import Data.Ratio.Rounding (dpRound)
(.>=.) :: (Show a, Show b) => a -> b -> String
(.>=.) x y = show x ++ " >= " ++ show y
(.<=.) :: (Show a, Show b) => a -> b -> String
(.<=.) x y = show x ++ " <= " ++ show y
(.~=.) :: (Show a, Show b) => a -> b -> String
(.~=.) x y = show x ++ " ~= " ++ show y
edgeToEdgeUnits :: TestTree
edgeToEdgeUnits = testGroup "Zone edge shortest path unit tests"
[ circumSampleUnits
, forbesUnits
]
m100 :: Tolerance Rational
m100 = Tolerance $ 100 % 1
mm100 :: Tolerance Rational
mm100 = Tolerance $ 100 % 1000
mm30 :: Tolerance Rational
mm30 = Tolerance $ 30 % 1000
mm10 :: Tolerance Rational
mm10 = Tolerance $ 10 % 1000
mm1 :: Tolerance Rational
mm1 = Tolerance $ 1 % 1000
sampleParams :: SampleParams Rational
sampleParams = SampleParams { spSamples = Samples 100
, spTolerance = mm30
}
ll :: LatLng Rational [u| rad |]
ll =
LatLng (lat, lng)
where
oneRadian = [u| 1 rad |]
lat = Lat oneRadian
lng = Lng oneRadian
br :: QBearing Rational [u| rad |]
br = let (Epsilon e) = defEps in (Bearing . MkQuantity $ F.pi e)
circumSampleUnits :: TestTree
circumSampleUnits = testGroup "Points just within the zone"
[ testGroup "Outside the zone."
[ HU.testCase
"No points > 0mm outside a 40m cylinder when searching within 1mm" $
zpFilter (>) ll [u| 40 m |]
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 40 m |]) ll))
@?= []
, HU.testCase
"No points > 0mm outside a 400m cylinder when searching within 1mm" $
zpFilter (>) ll [u| 400 m |]
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 400 m |]) ll))
@?= []
, HU.testCase
"No points > 0mm outside a 1km cylinder when searching within 10mm" $
zpFilter (>) ll (convert [u| 1 km |])
(fst $ cs (sampleParams { spTolerance = mm10 }) br Nothing (Cylinder (Radius $ convert [u| 1 km |]) ll))
@?= []
, HU.testCase
"No points > 0mm outside a 10km cylinder when searching within 100mm" $
zpFilter (>) ll (convert [u| 10 km |])
(fst $ cs (sampleParams { spTolerance = mm100 }) br Nothing (Cylinder (Radius $ convert [u| 10 km |]) ll))
@?= []
, HU.testCase
"No points > 0m outside a 100km cylinder when searching within 100m" $
zpFilter (>) ll (convert [u| 100 km |])
(fst $ cs (sampleParams { spTolerance = m100 }) br Nothing (Cylinder (Radius $ convert [u| 100 km |]) ll))
@?= []
]
, testGroup "Inside the zone."
[ HU.testCase
"No points > 1mm inside a 40m cylinder when searching within 1mm" $
zpFilter (<) ll ([u| 40 m |] -: convert [u| 1 mm |])
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 40 m |]) ll))
@?= []
, HU.testCase
"No points > 1mm inside a 400m cylinder when searching within 1mm" $
zpFilter (<) ll ([u| 400 m |] -: convert [u| 1 mm |])
(fst $ cs (sampleParams { spTolerance = mm1 }) br Nothing (Cylinder (Radius [u| 400 m |]) ll))
@?= []
, HU.testCase
"No points > 9mm inside a 1km cylinder when searching within 10mm" $
zpFilter (<) ll (convert [u| 1 km |] -: convert [u| 9 mm |])
(fst $ cs (sampleParams { spTolerance = mm10 }) br Nothing (Cylinder (Radius $ convert [u| 1 km |]) ll))
@?= []
, HU.testCase
"No points > 97mm inside a 10km cylinder when searching within 100mm" $
zpFilter (<) ll (convert [u| 10 km |] -: convert [u| 97 mm |])
(fst $ cs (sampleParams { spTolerance = mm100 }) br Nothing (Cylinder (Radius $ convert [u| 10 km |]) ll))
@?= []
, HU.testCase
"No points > 85m inside a 100km cylinder when searching within 100m" $
zpFilter (<) ll (convert [u| 100 km |] -: [u| 85 m |])
(fst $ cs (sampleParams { spTolerance = m100 }) br Nothing (Cylinder (Radius $ convert [u| 100 km |] ) ll))
@?= []
]
]
zpFilter
:: (Quantity Rational [u| m |] -> Quantity Rational [u| m |] -> Bool)
-> LatLng Rational [u| rad |]
-> Quantity Rational [u| m |]
-> [ZonePoint Rational]
-> [ZonePoint Rational]
zpFilter cmp origin d =
filter (\x -> zpDistance origin x `cmp` d)
zpDistance
:: LatLng Rational [u| rad |]
-> ZonePoint Rational
-> Quantity Rational [u| m |]
zpDistance origin ZonePoint{point} =
d
where
TaskDistance d =
edgesSum $ distancePointToPoint span [Point origin, Point point]
toLatLngDbl :: (Double, Double) -> LatLng Double [u| rad |]
toLatLngDbl (lat, lng) =
LatLng (Lat lat'', Lng lng'')
where
lat' = MkQuantity lat :: Quantity Double [u| deg |]
lng' = MkQuantity lng :: Quantity Double [u| deg |]
lat'' = convert lat' :: Quantity Double [u| rad |]
lng'' = convert lng' :: Quantity Double [u| rad |]
toLL :: (Double, Double) -> LatLng Rational [u| rad |]
toLL (lat, lng) =
LatLng (Lat lat'', Lng lng'')
where
lat' = (MkQuantity $ toRational lat) :: Quantity Rational [u| deg |]
lng' = (MkQuantity $ toRational lng) :: Quantity Rational [u| deg |]
lat'' = convert lat' :: Quantity Rational [u| rad |]
lng'' = convert lng' :: Quantity Rational [u| rad |]
forbesUnits :: TestTree
forbesUnits = testGroup "Forbes 2011/2012 distances"
[ day1PartUnits
, day1Units
, day2PartUnits
, day2Units
, day3PartUnits
, day3Units
, day4PartUnits
, day4Units
, day5PartUnits
, day5Units
, day6PartUnits
, day6Units
, day7PartUnits
, day7Units
, day8PartUnits
, day8Units
]
mkPartDayUnits :: TestName
-> [Zone Rational]
-> TaskDistance Rational
-> TestTree
mkPartDayUnits title zs (TaskDistance d) = testGroup title
[ HU.testCase
("point-to-point distance " ++ show td' ++ " ~= " ++ show tdR)
$ (tdR' == tdR) @? tdR' .~=. tdR
]
where
dKm = convert d :: Quantity Rational [u| km |]
Flip r = dpRound 3 <$> Flip dKm
tdR = TaskDistance (convert r :: Quantity Rational [u| m |])
td'@(TaskDistance d') = edgesSum $ distancePointToPoint span zs
dKm' = convert d' :: Quantity Rational [u| km |]
Flip r' = dpRound 3 <$> Flip dKm'
tdR' = TaskDistance (convert r' :: Quantity Rational [u| m |])
day1PartUnits :: TestTree
day1PartUnits = testGroup "Task 1 [...]"
[ mkPartDayUnits "Task 1 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 1 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 1 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 33.85373, 147.94195)
, (negate 33.4397, 148.34533)
, (negate 33.61965, 148.4099)
]
p1 = take 2 xs
d1 = fromKms [u| 54.755578 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 59.276627 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 20.88547 km |]
day2PartUnits :: TestTree
day2PartUnits = testGroup "Task 2 [...]"
[ mkPartDayUnits "Task 2 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 2 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 2 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.90223, 147.98492)
, (negate 32.9536, 147.55457)
, (negate 33.12592, 147.91043)
]
p1 = take 2 xs
d1 = fromKms [u| 51.290669 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 40.569544 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 38.30752 km |]
day3PartUnits :: TestTree
day3PartUnits = testGroup "Task 3 [...]"
[ mkPartDayUnits "Task 3 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 3 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 3 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 34.02107, 148.2233)
, (negate 34.11795, 148.5013)
, (negate 34.82197, 148.66543)
]
p1 = take 2 xs
d1 = fromKms [u| 78.147093 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 27.780099 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 79.716223 km |]
day4PartUnits :: TestTree
day4PartUnits = testGroup "Task 4 [...]"
[ mkPartDayUnits "Task 4 [x, x, _]" p1' d1
, mkPartDayUnits "Task 4 [_, x, x]" p2 d2
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.90223, 147.98492)
, (negate 32.46363, 148.989)
]
p1' = take 2 xs
d1 = fromKms [u| 51.290669 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 105.87255 km |]
day5PartUnits :: TestTree
day5PartUnits = testGroup "Task 5 [...]"
[ mkPartDayUnits "Task 5 [x, x, _]" p1 d1
, mkPartDayUnits "Task 5 [_, x, x]" p2 d2
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.56608, 148.22657)
, (negate 32.0164, 149.43363)
]
p1 = take 2 xs
d1 = fromKms [u| 92.601904 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 128.87562 km |]
day6PartUnits :: TestTree
day6PartUnits = testGroup "Task 6 [...]"
[ mkPartDayUnits "Task 6 [x, x, _]" p1 d1
, mkPartDayUnits "Task 6 [_, x, x]" p2 d2
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.19498, 147.76218)
, (negate 31.69323, 148.29623)
]
p1 = take 2 xs
d1 = fromKms [u| 130.665489 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 75.17947 km |]
day7PartUnits :: TestTree
day7PartUnits = testGroup "Task 7 [...]"
[ mkPartDayUnits "Task 7 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 7 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 7 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 32.9536, 147.55457)
, (negate 32.76052, 148.64958)
, (negate 32.93585, 148.74947)
]
p1 = take 2 xs
d1 = fromKms [u| 57.365312 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 104.509732 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 21.613886 km |]
day8PartUnits :: TestTree
day8PartUnits = testGroup "Task 8 [...]"
[ mkPartDayUnits "Task 8 [x, x, _, _]" p1 d1
, mkPartDayUnits "Task 8 [_, x, x, _]" p2 d2
, mkPartDayUnits "Task 8 [_, _, x, x]" p3 d3
]
where
xs =
Point . toLL <$>
[ (negate 33.36137, 147.93207)
, (negate 33.75343, 147.52865)
, (negate 33.12908, 147.57323)
, (negate 33.361, 147.9315)
]
p1 = take 2 xs
d1 = fromKms [u| 57.427511 km |]
p2 = take 2 $ drop 1 xs
d2 = fromKms [u| 69.547668 km |]
p3 = take 2 $ drop 2 xs
d3 = fromKms [u| 42.131961 km |]
mkDayUnits :: TestName
-> [Zone Rational]
-> TaskDistance Rational
-> [TaskDistance Rational]
-> TestTree
mkDayUnits title pDay dDay' dsDay' = testGroup title
[ HU.testCase "zones are separated" $ separatedZones span pDay @?= True
, HU.testCase
("point-to-point distance >= " ++ show dDay)
$ (ppDay >= dDay) @? ppDay .>=. dDay
, HU.testCase
("edge-to-edge distance <= " ++ show dDay)
$ (eeDay <= dDay) @? eeDay .<=. dDay
, HU.testCase
("point-to-point distances "
++ show ppDayInits
++ " >= "
++ show dsDay
) $
(ppDayInits >= dsDay) @? ppDayInits .>=. dsDay
, HU.testCase
("edge-to-edge distances "
++ show eeDayInits
++ " <= "
++ show dsDay
) $
distLess eeDayInits dsDay @? eeDayInits .<=. dsDay
]
where
dDay = tdRound dDay'
dsDay = tdRound <$> dsDay'
pp :: [Zone Rational] -> PathDistance Rational
pp = distancePointToPoint span
ee :: [Zone Rational] -> PathDistance Rational
ee xs =
case distanceEdgeToEdge' xs of
Zs x -> x
_ -> PathDistance (TaskDistance [u| 0 m |]) []
ppDay :: TaskDistance Rational
ppDay = tdRound . edgesSum $ pp pDay
eeDay :: TaskDistance Rational
eeDay = tdRound . edgesSum $ ee pDay
pDayInits :: [[Zone Rational]]
pDayInits = drop 1 $ inits pDay
ppDayInits :: [TaskDistance Rational]
ppDayInits = tdRound . edgesSum . pp <$> pDayInits
eeDayInits :: [TaskDistance Rational]
eeDayInits = tdRound . edgesSum . ee <$> pDayInits
distLess :: Ord a => [a] -> [a] -> Bool
distLess xs ys = take 1 (reverse xs) <= take 1 (reverse ys)
tdRound :: TaskDistance Rational -> TaskDistance Rational
tdRound (TaskDistance (MkQuantity d)) =
TaskDistance . MkQuantity . dpRound 2 $ d
day1Units :: TestTree
day1Units = mkDayUnits "Task 1" pDay1 dDay1 dsDay1
day2Units :: TestTree
day2Units = mkDayUnits "Task 2" pDay2 dDay2 dsDay2
day3Units :: TestTree
day3Units = mkDayUnits "Task 3" pDay3 dDay3 dsDay3
day4Units :: TestTree
day4Units = mkDayUnits "Task 4" pDay4 dDay4 dsDay4
day5Units :: TestTree
day5Units = mkDayUnits "Task 5" pDay5 dDay5 dsDay5
day6Units :: TestTree
day6Units = mkDayUnits "Task 6" pDay6 dDay6 dsDay6
day7Units :: TestTree
day7Units = mkDayUnits "Task 7" pDay7 dDay7 dsDay7
day8Units :: TestTree
day8Units = mkDayUnits "Task 8" pDay8 dDay8 dsDay8
NOTE : The task distances show below are taken from the competition * .fsdb file
at the path /Fs / FsCompetition / FsTasks / FsTask / FsTaskScoreParams / FsTaskDistToTp .
The first distance is not 9.9 kms , 10 kms - 100 m.
Some flight instruments use WGS84 and others use the FAI spheriod . To
accomodate this , there is a tolerance of either 0.01 % or 0.5 % used , depending
on the competition . For category 1 events since 2015 - 01 - 01 it is 0.01 % .
Category 2 events can elect to use the wider margin . This tolerance is used for
working out if tracks reach control zones .
The optimised route is worked out by FS in 2D space from a UTM projection . This
accounts for the discrepency with errors coming from choosing wrong waypoints
for the optimal route and from the conversion of these points back to the FAI
sphere .
TODO : Find out why the first distance is 9.882 and not 9.9 km .
< FsTaskDistToTp tp_no="1 " distance="0 " / >
< FsTaskDistToTp tp_no="2 " distance="9.882 " / >
< FsTaskDistToTp tp_no="3 " distance="54.254 " / >
< FsTaskDistToTp tp_no="4 " distance="112.779 " / >
< FsTaskDistToTp tp_no="5 " distance="133.357 " / >
The unit tests here are not comparing to distances of FS but to point to point
distances worked out on the FAI sphere .
SEE :
SEE : -type.co.uk/scripts/latlong-vincenty.html
SEE :
SEE : /
-33.36137 , 147.93207 , -33.85373 , 147.94195 , -33.4397 , 148.34533 , -33.61965 , 148.4099
-33.36137 , 147.93207 , -33.85373 , 147.94195
-33.85373 , 147.94195 , -33.4397 , 148.34533
-33.4397 , 148.34533 , -33.61965 , 148.4099
NOTE : Point to point distances using method .
= >
54.76
59.28
20.89
54.76 + 59.28 + 20.89
= > 134.93
134.93 - 10 - 0.4
= > 124.53
NOTE : Point to point distances using Vincenty method .
= >
54.62
59.24
20.84
54.62 + 59.24 + 20.84
= > 134.7
134.7 - 10 - 0.4
= > 124.30
- sphericalPointToPoint :
distance : 134.917675
legs :
- 54.755578
- 59.276627
- 20.88547
legsSum :
- 54.755578
- 114.032205
- 134.917675
waypoints :
- lat : -33.36137
: 147.93207
- lat : -33.85372998
lng : 147.94194999
- lat : -33.4397
lng : 148.34532999
- lat : -33.61965
: 148.40989999
NOTE: The task distances show below are taken from the competition *.fsdb file
at the path /Fs/FsCompetition/FsTasks/FsTask/FsTaskScoreParams/FsTaskDistToTp.
The first distance is not 9.9 kms, 10 kms - 100m.
Some flight instruments use WGS84 and others use the FAI spheriod. To
accomodate this, there is a tolerance of either 0.01% or 0.5% used, depending
on the competition. For category 1 events since 2015-01-01 it is 0.01%.
Category 2 events can elect to use the wider margin. This tolerance is used for
working out if tracks reach control zones.
The optimised route is worked out by FS in 2D space from a UTM projection. This
accounts for the discrepency with errors coming from choosing wrong waypoints
for the optimal route and from the conversion of these points back to the FAI
sphere.
TODO: Find out why the first distance is 9.882 and not 9.9 km.
<FsTaskDistToTp tp_no="1" distance="0" />
<FsTaskDistToTp tp_no="2" distance="9.882" />
<FsTaskDistToTp tp_no="3" distance="54.254" />
<FsTaskDistToTp tp_no="4" distance="112.779" />
<FsTaskDistToTp tp_no="5" distance="133.357" />
The unit tests here are not comparing to distances of FS but to point to point
distances worked out on the FAI sphere.
SEE:
SEE: -type.co.uk/scripts/latlong-vincenty.html
SEE:
SEE: /
-33.36137, 147.93207, -33.85373, 147.94195, -33.4397, 148.34533, -33.61965, 148.4099
-33.36137, 147.93207, -33.85373, 147.94195
-33.85373, 147.94195, -33.4397, 148.34533
-33.4397, 148.34533, -33.61965, 148.4099
NOTE: Point to point distances using Haversine method.
=>
54.76
59.28
20.89
54.76 + 59.28 + 20.89
=> 134.93
134.93 - 10 - 0.4
=> 124.53
NOTE: Point to point distances using Vincenty method.
=>
54.62
59.24
20.84
54.62 + 59.24 + 20.84
=> 134.7
134.7 - 10 - 0.4
=> 124.30
- sphericalPointToPoint:
distance: 134.917675
legs:
- 54.755578
- 59.276627
- 20.88547
legsSum:
- 54.755578
- 114.032205
- 134.917675
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -33.85372998
lng: 147.94194999
- lat: -33.4397
lng: 148.34532999
- lat: -33.61965
lng: 148.40989999
-}
pDay1 :: [Zone Rational]
pDay1 =
[ Cylinder (Radius $ MkQuantity 100) $ toLL (negate 33.36137, 147.93207)
, Cylinder (Radius $ MkQuantity 10000) $ toLL (negate 33.36137, 147.93207)
, Cylinder (Radius $ MkQuantity 400) $ toLL (negate 33.85373, 147.94195)
, Cylinder (Radius $ MkQuantity 400) $ toLL (negate 33.4397, 148.34533)
, Cylinder (Radius $ MkQuantity 400) $ toLL (negate 33.61965, 148.4099)
]
dDay1 :: TaskDistance Rational
dDay1 = fromKms [u| 134.917675 km |]
dsDay1 :: [TaskDistance Rational]
dsDay1 =
fromKms . MkQuantity <$>
[ 0
, 9.9
, 54.755578
, 114.032205
, 134.917675
]
-33.36137 , 147.93207 , -32.90223 , 147.98492 , -32.9536 , 147.55457 , -33.12592 , 147.91043
-33.36137 , 147.93207 , -32.90223 , 147.98492
-32.90223 , 147.98492 , -32.9536 , 147.55457
-32.9536 , 147.55457 , -33.12592 , 147.91043
NOTE : Point to point distances using method .
= >
51.29
40.57
38.31
51.29 + 40.57 + 38.31
= > 130.17
130.17 - 5 - 0.4
= > 124.77
NOTE : Point to point distances using Vincenty method .
= >
51.16
40.65
38.34
51.16 + 40.65 + 38.34
= > 130.15
130.15 - 5 - 0.4
= > 124.75
- sphericalPointToPoint :
distance : 130.167733
legs :
- 51.290669
- 40.569544
- 38.30752
legsSum :
- 51.290669
- 91.860213
- 130.167733
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.90223
: 147.98491999
- lat : -32.9536
: 147.55457
- lat : -33.12592
lng : 147.91042999
-33.36137, 147.93207, -32.90223, 147.98492, -32.9536, 147.55457, -33.12592, 147.91043
-33.36137, 147.93207, -32.90223, 147.98492
-32.90223, 147.98492, -32.9536, 147.55457
-32.9536, 147.55457, -33.12592, 147.91043
NOTE: Point to point distances using Haversine method.
=>
51.29
40.57
38.31
51.29 + 40.57 + 38.31
=> 130.17
130.17 - 5 - 0.4
=> 124.77
NOTE: Point to point distances using Vincenty method.
=>
51.16
40.65
38.34
51.16 + 40.65 + 38.34
=> 130.15
130.15 - 5 - 0.4
=> 124.75
- sphericalPointToPoint:
distance: 130.167733
legs:
- 51.290669
- 40.569544
- 38.30752
legsSum:
- 51.290669
- 91.860213
- 130.167733
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.90223
lng: 147.98491999
- lat: -32.9536
lng: 147.55457
- lat: -33.12592
lng: 147.91042999
-}
pDay2 :: [Zone Rational]
pDay2 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.90223, 147.98492))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.9536, 147.55457))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 33.12592, 147.91043))
]
dDay2 :: TaskDistance Rational
dDay2 = fromKms [u| 130.167733 km |]
dsDay2 :: [TaskDistance Rational]
dsDay2 =
fromKms . MkQuantity <$>
[ 0
, 4.9
, 51.290669
, 91.860213
, 130.167733
]
-33.36137 , 147.93207 , -34.02107 , 148.2233 , -34.11795 , 148.5013 , -34.82197 , 148.66543
-33.36137 , 147.93207 , -34.02107 , 148.2233
-34.02107 , 148.2233 , -34.11795 , 148.5013
-34.11795 , 148.5013 , -34.82197 , 148.66543
NOTE : Point to point distances using method .
= >
78.15
27.78
79.72
78.15 + 27.78 + 79.72
= > 185.65
185.65 - 25 - 0.4
= > 160.25
NOTE : Point to point distances using Vincenty method .
= >
77.99
27.82
79.54
77.99 + 27.82 + 79.54
= > 185.35
185.35 - 25 - 0.4
= > 159.95
- sphericalPointToPoint :
distance : 185.643415
legs :
- 78.147093
- 27.780099
- 79.716223
legsSum :
- 78.147093
- 105.927192
- 185.643415
waypoints :
- lat : -33.36137
: 147.93207
- lat : -34.02107
lng : 148.22329998
- lat : -34.11795
: 148.50129999
- lat : -34.82197
lng : 148.66542999
-33.36137, 147.93207, -34.02107, 148.2233, -34.11795, 148.5013, -34.82197, 148.66543
-33.36137, 147.93207, -34.02107, 148.2233
-34.02107, 148.2233, -34.11795, 148.5013
-34.11795, 148.5013, -34.82197, 148.66543
NOTE: Point to point distances using Haversine method.
=>
78.15
27.78
79.72
78.15 + 27.78 + 79.72
=> 185.65
185.65 - 25 - 0.4
=> 160.25
NOTE: Point to point distances using Vincenty method.
=>
77.99
27.82
79.54
77.99 + 27.82 + 79.54
=> 185.35
185.35 - 25 - 0.4
=> 159.95
- sphericalPointToPoint:
distance: 185.643415
legs:
- 78.147093
- 27.780099
- 79.716223
legsSum:
- 78.147093
- 105.927192
- 185.643415
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -34.02107
lng: 148.22329998
- lat: -34.11795
lng: 148.50129999
- lat: -34.82197
lng: 148.66542999
-}
pDay3 :: [Zone Rational]
pDay3 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 25000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 34.02107, 148.2233))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 34.11795, 148.5013))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 34.82197, 148.66543))
]
dDay3 :: TaskDistance Rational
dDay3 = fromKms [u| 185.643415 km |]
dsDay3 :: [TaskDistance Rational]
dsDay3 =
fromKms . MkQuantity <$>
[ 0
, 24.9
, 78.147093
, 105.927192
, 185.643415
]
-33.36137 , 147.93207 , -32.90223 , 147.98492 , -32.46363 , 148.989
-33.36137 , 147.93207 , -32.90223 , 147.98492
-32.90223 , 147.98492 , -32.46363 , 148.989
NOTE : Point to point distances using method .
= >
51.29
105.9
51.29 + 105.9
= > 157.19
157.19 - 15 - 0.4
= > 141.79
NOTE : Point to point distances using Vincenty method .
= >
51.16
106
51.16 + 106
= > 157.16
157.16 - 15 - 0.4
= > 141.76
- sphericalPointToPoint :
distance : 157.16322
legs :
- 51.290669
- 105.87255
legsSum :
- 51.290669
- 157.16322
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.90223
: 147.98491999
- lat : -32.46363
: 148.989
-33.36137, 147.93207, -32.90223, 147.98492, -32.46363, 148.989
-33.36137, 147.93207, -32.90223, 147.98492
-32.90223, 147.98492, -32.46363, 148.989
NOTE: Point to point distances using Haversine method.
=>
51.29
105.9
51.29 + 105.9
=> 157.19
157.19 - 15 - 0.4
=> 141.79
NOTE: Point to point distances using Vincenty method.
=>
51.16
106
51.16 + 106
=> 157.16
157.16 - 15 - 0.4
=> 141.76
- sphericalPointToPoint:
distance: 157.16322
legs:
- 51.290669
- 105.87255
legsSum:
- 51.290669
- 157.16322
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.90223
lng: 147.98491999
- lat: -32.46363
lng: 148.989
-}
pDay4 :: [Zone Rational]
pDay4 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 15000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 25000) (toLL (negate 32.90223, 147.98492))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.46363, 148.989))
]
dDay4 :: TaskDistance Rational
dDay4 = fromKms [u| 157.16322 km |]
dsDay4 :: [TaskDistance Rational]
dsDay4 =
fromKms . MkQuantity <$>
[ 0
, 14.9
, 51.290669
, 157.16322
]
-33.36137 , 147.93207 , -32.56608 , 148.22657 , -32.0164 , 149.43363
-33.36137 , 147.93207 , -32.56608 , 148.22657
-32.56608 , 148.22657 , -32.0164 , 149.43363
NOTE : Point to point distances using method .
= >
92.6
128.9
92.6 + 128.9
= > 221.5
221.5 - 15 - 0.4
= > 206.1
NOTE : Point to point distances using Vincenty method .
= >
92.4
129
92.4 + 129
= > 221.4
221.4 - 15 - 0.4
= > 206.0
- sphericalPointToPoint :
distance : 221.477524
legs :
- 92.601904
- 128.87562
legsSum :
- 92.601904
- 221.477524
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.56607998
lng : 148.22657
- lat : -32.01639998
lng : 149.43362998
-33.36137, 147.93207, -32.56608, 148.22657, -32.0164, 149.43363
-33.36137, 147.93207, -32.56608, 148.22657
-32.56608, 148.22657, -32.0164, 149.43363
NOTE: Point to point distances using Haversine method.
=>
92.6
128.9
92.6 + 128.9
=> 221.5
221.5 - 15 - 0.4
=> 206.1
NOTE: Point to point distances using Vincenty method.
=>
92.4
129
92.4 + 129
=> 221.4
221.4 - 15 - 0.4
=> 206.0
- sphericalPointToPoint:
distance: 221.477524
legs:
- 92.601904
- 128.87562
legsSum:
- 92.601904
- 221.477524
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.56607998
lng: 148.22657
- lat: -32.01639998
lng: 149.43362998
-}
pDay5 :: [Zone Rational]
pDay5 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 15000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 32.56608, 148.22657))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.0164, 149.43363))
]
dDay5 :: TaskDistance Rational
dDay5 = fromKms [u| 221.477524 km |]
dsDay5 :: [TaskDistance Rational]
dsDay5 =
fromKms . MkQuantity <$>
[ 0
, 14.9
, 92.601904
, 221.477524
]
-33.36137 , 147.93207 , -32.19498 , 147.76218 , -31.69323 , 148.29623
-33.36137 , 147.93207 , -32.19498 , 147.76218
-32.19498 , 147.76218 , -31.69323 , 148.29623
NOTE : Point to point distances using method .
= >
130.7
75.18
130.7 + 75.18
= > 205.88
205.88 - 15 - 0.4
= > 190.48
NOTE : Point to point distances using Vincenty method .
= >
130.3
75.13
130.3 + 75.13
= > 205.43
205.43 - 15 - 0.4
= > 190.03
- sphericalPointToPoint :
distance : 205.844959
legs :
- 130.665489
- 75.17947
legsSum :
- 130.665489
- 205.844959
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.19498
lng : 147.76218
- lat : -31.69322998
: 148.29623
-33.36137, 147.93207, -32.19498, 147.76218, -31.69323, 148.29623
-33.36137, 147.93207, -32.19498, 147.76218
-32.19498, 147.76218, -31.69323, 148.29623
NOTE: Point to point distances using Haversine method.
=>
130.7
75.18
130.7 + 75.18
=> 205.88
205.88 - 15 - 0.4
=> 190.48
NOTE: Point to point distances using Vincenty method.
=>
130.3
75.13
130.3 + 75.13
=> 205.43
205.43 - 15 - 0.4
=> 190.03
- sphericalPointToPoint:
distance: 205.844959
legs:
- 130.665489
- 75.17947
legsSum:
- 130.665489
- 205.844959
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.19498
lng: 147.76218
- lat: -31.69322998
lng: 148.29623
-}
pDay6 :: [Zone Rational]
pDay6 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 15000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 32.19498, 147.76218))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 31.69323, 148.29623))
]
dDay6 :: TaskDistance Rational
dDay6 = fromKms [u| 205.844959 km |]
dsDay6 :: [TaskDistance Rational]
dsDay6 =
fromKms . MkQuantity <$>
[ 0
, 14.9
, 130.665489
, 205.844959
]
-33.36137 , 147.93207 , -32.9536 , 147.55457 , -32.76052 , 148.64958 , -32.93585 , 148.74947
-33.36137 , 147.93207 , -32.9536 , 147.55457
-32.9536 , 147.55457 , -32.76052 , 148.64958
-32.76052 , 148.64958 , -32.93585 , 148.74947
NOTE : Point to point distances using method .
= >
57.37
104.5
21.61
57.37 + 104.5 + 21.61
= > 183.48
183.48 - 10 - 0.4
= > 173.08
NOTE : Point to point distances using Vincenty method .
= >
57.32
104.7
21.58
57.32 + 104.7 + 21.58
= > 183.60
183.60 - 10 - 0.4
= > 173.2
- sphericalPointToPoint :
distance : 183.488931
legs :
- 57.365312
- 104.509732
- 21.613886
legsSum :
- 57.365312
- 161.875045
- 183.488931
waypoints :
- lat : -33.36137
: 147.93207
- lat : -32.9536
: 147.55457
- lat : -32.76051998
lng : 148.64957999
- lat : -32.93585
: 148.74947
-33.36137, 147.93207, -32.9536, 147.55457, -32.76052, 148.64958, -32.93585, 148.74947
-33.36137, 147.93207, -32.9536, 147.55457
-32.9536, 147.55457, -32.76052, 148.64958
-32.76052, 148.64958, -32.93585, 148.74947
NOTE: Point to point distances using Haversine method.
=>
57.37
104.5
21.61
57.37 + 104.5 + 21.61
=> 183.48
183.48 - 10 - 0.4
=> 173.08
NOTE: Point to point distances using Vincenty method.
=>
57.32
104.7
21.58
57.32 + 104.7 + 21.58
=> 183.60
183.60 - 10 - 0.4
=> 173.2
- sphericalPointToPoint:
distance: 183.488931
legs:
- 57.365312
- 104.509732
- 21.613886
legsSum:
- 57.365312
- 161.875045
- 183.488931
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -32.9536
lng: 147.55457
- lat: -32.76051998
lng: 148.64957999
- lat: -32.93585
lng: 148.74947
-}
pDay7 :: [Zone Rational]
pDay7 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 10000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 32.9536, 147.55457))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.76052, 148.64958))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 32.93585, 148.74947))
]
dDay7 :: TaskDistance Rational
dDay7 = fromKms [u| 183.488931 km |]
dsDay7 :: [TaskDistance Rational]
dsDay7 =
fromKms . MkQuantity <$>
[ 0
, 9.9
, 57.365312
, 161.875045
, 183.488931
]
-33.36137 , 147.93207 , -33.75343 , 147.52865 , -33.12908 , 147.57323 , -33.361 , 147.9315
-33.36137 , 147.93207 , -33.75343 , 147.52865
-33.75343 , 147.52865 , -33.12908 , 147.57323
-33.12908 , 147.57323 , -33.361 , 147.9315
NOTE : Point to point distances using method .
= >
57.43
69.55
42.13
57.43 + 69.55 + 42.13
= > 169.11
169.11 - 10 - 0.4
= > 158.71
NOTE : Point to point distances using Vincenty method .
= >
57.4
69.37
42.15
57.4 + 69.37 + 42.15
= > 168.92
169.92 - 10 - 0.4
= > 159.52
- sphericalPointToPoint :
distance : 169.10714
legs :
- 57.427511
- 69.547668
- 42.131961
legsSum :
- 57.427511
- 126.975179
- 169.10714
waypoints :
- lat : -33.36137
: 147.93207
- lat : -33.75343
: 147.52864998
- lat : -33.12908
: 147.57322998
- lat : -33.36099999
: 147.93149998
-33.36137, 147.93207, -33.75343, 147.52865, -33.12908, 147.57323, -33.361, 147.9315
-33.36137, 147.93207, -33.75343, 147.52865
-33.75343, 147.52865, -33.12908, 147.57323
-33.12908, 147.57323, -33.361, 147.9315
NOTE: Point to point distances using Haversine method.
=>
57.43
69.55
42.13
57.43 + 69.55 + 42.13
=> 169.11
169.11 - 10 - 0.4
=> 158.71
NOTE: Point to point distances using Vincenty method.
=>
57.4
69.37
42.15
57.4 + 69.37 + 42.15
=> 168.92
169.92 - 10 - 0.4
=> 159.52
- sphericalPointToPoint:
distance: 169.10714
legs:
- 57.427511
- 69.547668
- 42.131961
legsSum:
- 57.427511
- 126.975179
- 169.10714
waypoints:
- lat: -33.36137
lng: 147.93207
- lat: -33.75343
lng: 147.52864998
- lat: -33.12908
lng: 147.57322998
- lat: -33.36099999
lng: 147.93149998
-}
pDay8 :: [Zone Rational]
pDay8 =
[ Cylinder (Radius $ MkQuantity 100) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 10000) (toLL (negate 33.36137, 147.93207))
, Cylinder (Radius $ MkQuantity 5000) (toLL (negate 33.75343, 147.52865))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 33.12908, 147.57323))
, Cylinder (Radius $ MkQuantity 400) (toLL (negate 33.361, 147.9315))
]
dDay8 :: TaskDistance Rational
dDay8 = fromKms [u| 169.10714 km |]
dsDay8 :: [TaskDistance Rational]
dsDay8 =
fromKms . MkQuantity <$>
[ 0
, 9.9
, 57.427511
, 126.975179
, 169.10714
]
distanceEdgeToEdge' :: [Zone Rational] -> Zs (PathDistance Rational)
distanceEdgeToEdge' =
FS.distanceEdgeToEdge span distancePointToPoint segCost cs cut mm30
segCost :: CostSegment Rational
segCost = costSegment span
span :: SpanLatLng Rational
span = Rat.distanceHaversine defEps
cs :: CircumSample Rational
cs = Rat.circumSample
cut :: AngleCut Rational
cut =
AngleCut
{ sweep =
let (Epsilon e) = defEps
in Bearing . MkQuantity $ F.pi e
, nextSweep = nextCut
}
nextCut :: AngleCut Rational -> AngleCut Rational
nextCut x@AngleCut{sweep} =
let (Bearing b) = sweep in x{sweep = Bearing $ b /: 2}
|
ce3c6cdf8fcb9f6292c7320bad7ffbbcab6dfbd9e94becf326371ca8b0af8b67 | rfkm/zou | rule_test.clj | (ns zou.finder.rule-test
(:require [clojure.test :as t]
[midje.sweet :refer :all]
[zou.component :as c]
[zou.finder.proto :as proto]
[zou.finder.rule :as sut]))
(def test-var)
(t/deftest helper-test
(fact "expand-kw"
(let [conf {nil :hoge.handler
:acme :acme.handler
:acme.fuga :acme-fuga.handler}]
(sut/expand-kw :foo/bar conf) => :hoge.handler.foo/bar
(sut/expand-kw :acme.foo/bar conf) => :acme.handler.foo/bar
;; longer match wins
(sut/expand-kw :acme.fuga/bar conf) => :acme-fuga.handler/bar
(sut/expand-kw :bar {nil :hoge.handler
:bar :baz}) => :hoge.handler/bar))
(fact "kw->var"
(sut/kw->var ::test-var {}) => #'test-var
(sut/kw->var :foo/test-var {:foo :zou.finder.rule-test}) => #'test-var))
(t/deftest component-test
(fact
(c/with-component [c (sut/->RuleBasedFinder {:foo :zou.finder.rule-test})]
(proto/find c :foo/test-var) => #'test-var)))
| null | https://raw.githubusercontent.com/rfkm/zou/228feefae3e008f56806589cb8019511981f7b01/lib/test/zou/finder/rule_test.clj | clojure | longer match wins | (ns zou.finder.rule-test
(:require [clojure.test :as t]
[midje.sweet :refer :all]
[zou.component :as c]
[zou.finder.proto :as proto]
[zou.finder.rule :as sut]))
(def test-var)
(t/deftest helper-test
(fact "expand-kw"
(let [conf {nil :hoge.handler
:acme :acme.handler
:acme.fuga :acme-fuga.handler}]
(sut/expand-kw :foo/bar conf) => :hoge.handler.foo/bar
(sut/expand-kw :acme.foo/bar conf) => :acme.handler.foo/bar
(sut/expand-kw :acme.fuga/bar conf) => :acme-fuga.handler/bar
(sut/expand-kw :bar {nil :hoge.handler
:bar :baz}) => :hoge.handler/bar))
(fact "kw->var"
(sut/kw->var ::test-var {}) => #'test-var
(sut/kw->var :foo/test-var {:foo :zou.finder.rule-test}) => #'test-var))
(t/deftest component-test
(fact
(c/with-component [c (sut/->RuleBasedFinder {:foo :zou.finder.rule-test})]
(proto/find c :foo/test-var) => #'test-var)))
|
3e822c364cc0e42f66379e716a6bb1cf5ca59545fdb2217f9886f66a79c1f441 | yomimono/stitchcraft | controls.ml | open Stitchy.Types
type view = {
x_off : int;
y_off : int;
block_display : [ `Symbol | `Solid ];
zoom : int;
}
type pane = {
width : int;
height : int;
}
(* dimensions of the x-axis labels and y-axis labels
(the upper-right and lower-left rectangles respectively)
are implicitly defined by the empty corner and stitch grid
(the upper-left and lower-right rectangles) *)
type left_pane = {
empty_corner : pane;
stitch_grid : pane;
}
let switch_view v =
match v.block_display with
| `Symbol -> {v with block_display = `Solid }
| `Solid -> {v with block_display = `Symbol }
let limit_rd_scroll ~last_page proposed =
max 0 @@ min proposed last_page
let limit_lu_scroll proposed =
max proposed 0
let page_right substrate view left_pane =
let next_page = view.x_off + left_pane.stitch_grid.width
and last_page = (substrate.max_x - left_pane.stitch_grid.width)
in
let best_offset = limit_rd_scroll ~last_page next_page in
{ view with x_off = best_offset }
let page_left view left_pane =
let prev_page = view.x_off - left_pane.stitch_grid.width in
let best_offset = limit_lu_scroll prev_page in
{ view with x_off = best_offset }
let page_down substrate view left_pane =
let next_page = view.y_off + left_pane.stitch_grid.height
and last_page = (substrate.max_y - left_pane.stitch_grid.height)
in
let best_offset = limit_rd_scroll ~last_page next_page in
{ view with y_off = best_offset }
let page_up view left_pane =
let prev_page = view.y_off - left_pane.stitch_grid.height in
let best_offset = limit_lu_scroll prev_page in
{ view with y_off = best_offset }
let scroll substrate view = function
| `Right -> { view with x_off = limit_rd_scroll ~last_page:(substrate.max_x - 1) (view.x_off + 1) }
| `Left -> { view with x_off = limit_lu_scroll (view.x_off - 1) }
| `Up -> { view with y_off = limit_lu_scroll (view.y_off - 1) }
| `Down -> { view with y_off = limit_rd_scroll ~last_page:(substrate.max_y - 1) (view.y_off + 1) }
let page substrate view left_pane = function
| `Right -> page_right substrate view left_pane
| `Left -> page_left view left_pane
| `Up -> page_up view left_pane
| `Down -> page_down substrate view left_pane
| null | https://raw.githubusercontent.com/yomimono/stitchcraft/329e528544c22971182c310d1240050ef8273960/notty_canvas/lib/controls.ml | ocaml | dimensions of the x-axis labels and y-axis labels
(the upper-right and lower-left rectangles respectively)
are implicitly defined by the empty corner and stitch grid
(the upper-left and lower-right rectangles) | open Stitchy.Types
type view = {
x_off : int;
y_off : int;
block_display : [ `Symbol | `Solid ];
zoom : int;
}
type pane = {
width : int;
height : int;
}
type left_pane = {
empty_corner : pane;
stitch_grid : pane;
}
let switch_view v =
match v.block_display with
| `Symbol -> {v with block_display = `Solid }
| `Solid -> {v with block_display = `Symbol }
let limit_rd_scroll ~last_page proposed =
max 0 @@ min proposed last_page
let limit_lu_scroll proposed =
max proposed 0
let page_right substrate view left_pane =
let next_page = view.x_off + left_pane.stitch_grid.width
and last_page = (substrate.max_x - left_pane.stitch_grid.width)
in
let best_offset = limit_rd_scroll ~last_page next_page in
{ view with x_off = best_offset }
let page_left view left_pane =
let prev_page = view.x_off - left_pane.stitch_grid.width in
let best_offset = limit_lu_scroll prev_page in
{ view with x_off = best_offset }
let page_down substrate view left_pane =
let next_page = view.y_off + left_pane.stitch_grid.height
and last_page = (substrate.max_y - left_pane.stitch_grid.height)
in
let best_offset = limit_rd_scroll ~last_page next_page in
{ view with y_off = best_offset }
let page_up view left_pane =
let prev_page = view.y_off - left_pane.stitch_grid.height in
let best_offset = limit_lu_scroll prev_page in
{ view with y_off = best_offset }
let scroll substrate view = function
| `Right -> { view with x_off = limit_rd_scroll ~last_page:(substrate.max_x - 1) (view.x_off + 1) }
| `Left -> { view with x_off = limit_lu_scroll (view.x_off - 1) }
| `Up -> { view with y_off = limit_lu_scroll (view.y_off - 1) }
| `Down -> { view with y_off = limit_rd_scroll ~last_page:(substrate.max_y - 1) (view.y_off + 1) }
let page substrate view left_pane = function
| `Right -> page_right substrate view left_pane
| `Left -> page_left view left_pane
| `Up -> page_up view left_pane
| `Down -> page_down substrate view left_pane
|
1e57b301655491e0fe4d68b6995494a9f4bde14d9ab16594a5b1e0d4b061a203 | typelead/etlas | Script.hs | -----------------------------------------------------------------------------
-- |
-- Module : Distribution.Simple.Program.Script
Copyright : 2009
--
-- Maintainer :
-- Portability : portable
--
-- This module provides an library interface to the @hc-pkg@ program.
Currently only GHC and LHC have hc - pkg programs .
module Distribution.Simple.Program.Script (
invocationAsSystemScript,
invocationAsShellScript,
invocationAsBatchFile,
) where
import Prelude ()
import Distribution.Compat.Prelude
import Distribution.Simple.Program.Run
import Distribution.System
| Generate a system script , either POSIX shell script or Windows batch file
-- as appropriate for the given system.
--
invocationAsSystemScript :: OS -> ProgramInvocation -> String
invocationAsSystemScript Windows = invocationAsBatchFile
invocationAsSystemScript _ = invocationAsShellScript
-- | Generate a POSIX shell script that invokes a program.
--
invocationAsShellScript :: ProgramInvocation -> String
invocationAsShellScript
ProgramInvocation {
progInvokePath = path,
progInvokeArgs = args,
progInvokeEnv = envExtra,
progInvokeCwd = mcwd,
progInvokeInput = minput
} = unlines $
[ "#!/bin/sh" ]
++ concatMap setEnv envExtra
++ [ "cd " ++ quote cwd | cwd <- maybeToList mcwd ]
++ [ (case minput of
Nothing -> ""
Just input -> "echo " ++ quote input ++ " | ")
++ unwords (map quote $ path : args) ++ " \"$@\""]
where
setEnv (var, Nothing) = ["unset " ++ var, "export " ++ var]
setEnv (var, Just val) = ["export " ++ var ++ "=" ++ quote val]
quote :: String -> String
quote s = "'" ++ escape s ++ "'"
escape [] = []
escape ('\'':cs) = "'\\''" ++ escape cs
escape (c :cs) = c : escape cs
| Generate a Windows batch file that invokes a program .
--
invocationAsBatchFile :: ProgramInvocation -> String
invocationAsBatchFile
ProgramInvocation {
progInvokePath = path,
progInvokeArgs = args,
progInvokeEnv = envExtra,
progInvokeCwd = mcwd,
progInvokeInput = minput
} = unlines $
[ "@echo off" ]
++ map setEnv envExtra
++ [ "cd \"" ++ cwd ++ "\"" | cwd <- maybeToList mcwd ]
++ case minput of
Nothing ->
[ path ++ concatMap (' ':) args ]
Just input ->
[ "(" ]
++ [ "echo " ++ escape line | line <- lines input ]
++ [ ") | "
++ "\"" ++ path ++ "\""
++ concatMap (\arg -> ' ':quote arg) args ]
where
setEnv (var, Nothing) = "set " ++ var ++ "="
setEnv (var, Just val) = "set " ++ var ++ "=" ++ escape val
quote :: String -> String
quote s = "\"" ++ escapeQ s ++ "\""
escapeQ [] = []
escapeQ ('"':cs) = "\"\"\"" ++ escapeQ cs
escapeQ (c :cs) = c : escapeQ cs
escape [] = []
escape ('|':cs) = "^|" ++ escape cs
escape ('<':cs) = "^<" ++ escape cs
escape ('>':cs) = "^>" ++ escape cs
escape ('&':cs) = "^&" ++ escape cs
escape ('(':cs) = "^(" ++ escape cs
escape (')':cs) = "^)" ++ escape cs
escape ('^':cs) = "^^" ++ escape cs
escape (c :cs) = c : escape cs
| null | https://raw.githubusercontent.com/typelead/etlas/bbd7c558169e1fda086e759e1a6f8c8ca2807583/etlas-cabal/Distribution/Simple/Program/Script.hs | haskell | ---------------------------------------------------------------------------
|
Module : Distribution.Simple.Program.Script
Maintainer :
Portability : portable
This module provides an library interface to the @hc-pkg@ program.
as appropriate for the given system.
| Generate a POSIX shell script that invokes a program.
| Copyright : 2009
Currently only GHC and LHC have hc - pkg programs .
module Distribution.Simple.Program.Script (
invocationAsSystemScript,
invocationAsShellScript,
invocationAsBatchFile,
) where
import Prelude ()
import Distribution.Compat.Prelude
import Distribution.Simple.Program.Run
import Distribution.System
| Generate a system script , either POSIX shell script or Windows batch file
invocationAsSystemScript :: OS -> ProgramInvocation -> String
invocationAsSystemScript Windows = invocationAsBatchFile
invocationAsSystemScript _ = invocationAsShellScript
invocationAsShellScript :: ProgramInvocation -> String
invocationAsShellScript
ProgramInvocation {
progInvokePath = path,
progInvokeArgs = args,
progInvokeEnv = envExtra,
progInvokeCwd = mcwd,
progInvokeInput = minput
} = unlines $
[ "#!/bin/sh" ]
++ concatMap setEnv envExtra
++ [ "cd " ++ quote cwd | cwd <- maybeToList mcwd ]
++ [ (case minput of
Nothing -> ""
Just input -> "echo " ++ quote input ++ " | ")
++ unwords (map quote $ path : args) ++ " \"$@\""]
where
setEnv (var, Nothing) = ["unset " ++ var, "export " ++ var]
setEnv (var, Just val) = ["export " ++ var ++ "=" ++ quote val]
quote :: String -> String
quote s = "'" ++ escape s ++ "'"
escape [] = []
escape ('\'':cs) = "'\\''" ++ escape cs
escape (c :cs) = c : escape cs
| Generate a Windows batch file that invokes a program .
invocationAsBatchFile :: ProgramInvocation -> String
invocationAsBatchFile
ProgramInvocation {
progInvokePath = path,
progInvokeArgs = args,
progInvokeEnv = envExtra,
progInvokeCwd = mcwd,
progInvokeInput = minput
} = unlines $
[ "@echo off" ]
++ map setEnv envExtra
++ [ "cd \"" ++ cwd ++ "\"" | cwd <- maybeToList mcwd ]
++ case minput of
Nothing ->
[ path ++ concatMap (' ':) args ]
Just input ->
[ "(" ]
++ [ "echo " ++ escape line | line <- lines input ]
++ [ ") | "
++ "\"" ++ path ++ "\""
++ concatMap (\arg -> ' ':quote arg) args ]
where
setEnv (var, Nothing) = "set " ++ var ++ "="
setEnv (var, Just val) = "set " ++ var ++ "=" ++ escape val
quote :: String -> String
quote s = "\"" ++ escapeQ s ++ "\""
escapeQ [] = []
escapeQ ('"':cs) = "\"\"\"" ++ escapeQ cs
escapeQ (c :cs) = c : escapeQ cs
escape [] = []
escape ('|':cs) = "^|" ++ escape cs
escape ('<':cs) = "^<" ++ escape cs
escape ('>':cs) = "^>" ++ escape cs
escape ('&':cs) = "^&" ++ escape cs
escape ('(':cs) = "^(" ++ escape cs
escape (')':cs) = "^)" ++ escape cs
escape ('^':cs) = "^^" ++ escape cs
escape (c :cs) = c : escape cs
|
c728a7bd5a8074dc9cc190a8deae6658b0729991a5e12dbeca55cb8019b68981 | clojure-interop/google-cloud-clients | Instance.clj | (ns com.google.cloud.spanner.Instance
"Represents a Cloud Spanner Instance. Instance adds a layer of service related
functionality over InstanceInfo."
(:refer-clojure :only [require comment defn ->])
(:import [com.google.cloud.spanner Instance]))
(defn reload
"returns: `com.google.cloud.spanner.Instance`"
(^com.google.cloud.spanner.Instance [^Instance this]
(-> this (.reload))))
(defn delete
""
([^Instance this]
(-> this (.delete))))
(defn update
"fields-to-update - `com.google.cloud.spanner.InstanceInfo$InstanceField`
returns: `com.google.api.gax.longrunning.OperationFuture<com.google.cloud.spanner.Instance,com.google.spanner.admin.instance.v1.UpdateInstanceMetadata>`"
(^com.google.api.gax.longrunning.OperationFuture [^Instance this ^com.google.cloud.spanner.InstanceInfo$InstanceField fields-to-update]
(-> this (.update fields-to-update))))
(defn list-databases
"returns: `com.google.api.gax.paging.Page<com.google.cloud.spanner.Database>`"
(^com.google.api.gax.paging.Page [^Instance this]
(-> this (.listDatabases))))
(defn get-database
"database-id - `java.lang.String`
returns: `com.google.cloud.spanner.Database`"
(^com.google.cloud.spanner.Database [^Instance this ^java.lang.String database-id]
(-> this (.getDatabase database-id))))
(defn create-database
"Creates a new database in this instance.
database-id - the id of the database which will be created. It must conform to the regular expression [a-z][a-z0-9_\\-]*[a-z0-9] and be between 2 and 30 characters in length - `java.lang.String`
statements - DDL statements to run while creating the database, for example CREATE TABLE MyTable ( ... ). This should not include CREATE DATABASE statement. - `java.lang.Iterable`
returns: `com.google.api.gax.longrunning.OperationFuture<com.google.cloud.spanner.Database,com.google.spanner.admin.database.v1.CreateDatabaseMetadata>`
throws: com.google.cloud.spanner.SpannerException"
(^com.google.api.gax.longrunning.OperationFuture [^Instance this ^java.lang.String database-id ^java.lang.Iterable statements]
(-> this (.createDatabase database-id statements))))
(defn to-builder
"returns: `com.google.cloud.spanner.Instance$Builder`"
(^com.google.cloud.spanner.Instance$Builder [^Instance this]
(-> this (.toBuilder))))
| null | https://raw.githubusercontent.com/clojure-interop/google-cloud-clients/80852d0496057c22f9cdc86d6f9ffc0fa3cd7904/com.google.cloud.spanner/src/com/google/cloud/spanner/Instance.clj | clojure | (ns com.google.cloud.spanner.Instance
"Represents a Cloud Spanner Instance. Instance adds a layer of service related
functionality over InstanceInfo."
(:refer-clojure :only [require comment defn ->])
(:import [com.google.cloud.spanner Instance]))
(defn reload
"returns: `com.google.cloud.spanner.Instance`"
(^com.google.cloud.spanner.Instance [^Instance this]
(-> this (.reload))))
(defn delete
""
([^Instance this]
(-> this (.delete))))
(defn update
"fields-to-update - `com.google.cloud.spanner.InstanceInfo$InstanceField`
returns: `com.google.api.gax.longrunning.OperationFuture<com.google.cloud.spanner.Instance,com.google.spanner.admin.instance.v1.UpdateInstanceMetadata>`"
(^com.google.api.gax.longrunning.OperationFuture [^Instance this ^com.google.cloud.spanner.InstanceInfo$InstanceField fields-to-update]
(-> this (.update fields-to-update))))
(defn list-databases
"returns: `com.google.api.gax.paging.Page<com.google.cloud.spanner.Database>`"
(^com.google.api.gax.paging.Page [^Instance this]
(-> this (.listDatabases))))
(defn get-database
"database-id - `java.lang.String`
returns: `com.google.cloud.spanner.Database`"
(^com.google.cloud.spanner.Database [^Instance this ^java.lang.String database-id]
(-> this (.getDatabase database-id))))
(defn create-database
"Creates a new database in this instance.
database-id - the id of the database which will be created. It must conform to the regular expression [a-z][a-z0-9_\\-]*[a-z0-9] and be between 2 and 30 characters in length - `java.lang.String`
statements - DDL statements to run while creating the database, for example CREATE TABLE MyTable ( ... ). This should not include CREATE DATABASE statement. - `java.lang.Iterable`
returns: `com.google.api.gax.longrunning.OperationFuture<com.google.cloud.spanner.Database,com.google.spanner.admin.database.v1.CreateDatabaseMetadata>`
throws: com.google.cloud.spanner.SpannerException"
(^com.google.api.gax.longrunning.OperationFuture [^Instance this ^java.lang.String database-id ^java.lang.Iterable statements]
(-> this (.createDatabase database-id statements))))
(defn to-builder
"returns: `com.google.cloud.spanner.Instance$Builder`"
(^com.google.cloud.spanner.Instance$Builder [^Instance this]
(-> this (.toBuilder))))
|
|
e7a8105d8db13abef1c6bc6a5ca2cdba579ac6b75147c7e8233dee2b01e5c492 | softwarelanguageslab/maf | R5RS_WeiChenRompf2019_the-little-schemer_ch2-2.scm | ; Changes:
* removed : 0
* added : 0
* swaps : 1
; * negated predicates: 0
* swapped branches : 1
; * calls to id fun: 0
(letrec ((atom? (lambda (x)
(if (not (pair? x))
(<change>
(not (null? x))
#f)
(<change>
#f
(not (null? x))))))
(lat? (lambda (l)
(if (null? l)
#t
(if (atom? (car l)) (lat? (cdr l)) #f)))))
(lat?
(__toplevel_cons
'Jack
(__toplevel_cons
'Sprat
(__toplevel_cons
'could
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ()))))))))
(lat?
(__toplevel_cons
(__toplevel_cons 'Jack ())
(__toplevel_cons
'Sprat
(__toplevel_cons
'could
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ()))))))))
(<change>
(lat?
(__toplevel_cons
'Jack
(__toplevel_cons
(__toplevel_cons 'Sprat (__toplevel_cons 'could ()))
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ())))))))
(lat? ()))
(<change>
(lat? ())
(lat?
(__toplevel_cons
'Jack
(__toplevel_cons
(__toplevel_cons 'Sprat (__toplevel_cons 'could ()))
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ()))))))))) | null | https://raw.githubusercontent.com/softwarelanguageslab/maf/11acedf56b9bf0c8e55ddb6aea754b6766d8bb40/test/changes/scheme/generated/R5RS_WeiChenRompf2019_the-little-schemer_ch2-2.scm | scheme | Changes:
* negated predicates: 0
* calls to id fun: 0 | * removed : 0
* added : 0
* swaps : 1
* swapped branches : 1
(letrec ((atom? (lambda (x)
(if (not (pair? x))
(<change>
(not (null? x))
#f)
(<change>
#f
(not (null? x))))))
(lat? (lambda (l)
(if (null? l)
#t
(if (atom? (car l)) (lat? (cdr l)) #f)))))
(lat?
(__toplevel_cons
'Jack
(__toplevel_cons
'Sprat
(__toplevel_cons
'could
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ()))))))))
(lat?
(__toplevel_cons
(__toplevel_cons 'Jack ())
(__toplevel_cons
'Sprat
(__toplevel_cons
'could
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ()))))))))
(<change>
(lat?
(__toplevel_cons
'Jack
(__toplevel_cons
(__toplevel_cons 'Sprat (__toplevel_cons 'could ()))
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ())))))))
(lat? ()))
(<change>
(lat? ())
(lat?
(__toplevel_cons
'Jack
(__toplevel_cons
(__toplevel_cons 'Sprat (__toplevel_cons 'could ()))
(__toplevel_cons 'eat (__toplevel_cons 'no (__toplevel_cons 'chicken (__toplevel_cons 'fat ()))))))))) |
910de451c3dd72d2cd6eb51b5bf6c971fda724b7fef3da7f55c72a47d24f04fd | manuel-serrano/bigloo | date.scm | ;*=====================================================================*/
* serrano / prgm / project / bigloo / api / web / src / Llib / date.scm * /
;* ------------------------------------------------------------- */
* Author : * /
* Creation : Tue Jul 22 08:06:43 2008 * /
* Last change : Tue Mar 22 08:20:59 2011 ( serrano ) * /
* Copyright : 2008 - 11 * /
;* ------------------------------------------------------------- */
* W3C dates ( i.e. , ISO8601 date format described in RFC 3339 ) . * /
;* See: */
;* -datetime-19980827 */
;* */
;*=====================================================================*/
;*---------------------------------------------------------------------*/
;* The module */
;*---------------------------------------------------------------------*/
(module __web_date
(export (w3c-datetime-timestamp::bstring)
(w3c-datetime->date::date ::bstring)
(date->w3c-datetime::bstring ::date)))
;*---------------------------------------------------------------------*/
;* w3c-datetime-timestamp ... */
;*---------------------------------------------------------------------*/
(define (w3c-datetime-timestamp)
(date->w3c-datetime (current-date)))
;*---------------------------------------------------------------------*/
;* w3c-datetime-parse ... */
;*---------------------------------------------------------------------*/
(define (w3c-datetime-parse string)
;; -datetime
(let ((ip (open-input-string string)))
(define date-grammar
(regular-grammar ((2d (= 2 digit)) (4d (= 4 digit)))
((: 4d "-" 2d "-" 2d "T") ; Date followed by time
(let ((d1 (the-substring 0 4))
(d2 (the-substring 5 7))
(d3 (the-substring 8 10)))
(cons* d1 d2 d3 (read/rp time-grammar ip))))
((: 4d "-" 2d "-" 2d) ; Full date without time
(list (the-substring 0 4) (the-substring 5 7) (the-substring 8 10)))
Year and month
(list (the-substring 0 4) (the-substring 5 7)))
((: 4d) ; Year
(list (the-substring 0 4)))
(else
(error 'w3c-datetine-parse
"Invalid (ISO-8601:2000/W3C-NOTE-datetime) format"
string))))
(define time-grammar
; At this point, the 'T' character has been read by time-grammar
(regular-grammar ((2d (= 2 digit)))
((: 2d ":" 2d ":" 2d (in ",.") (+ digit)) ; Full time
decimal fraction is ignored as ignores it too
(let ((d1 (the-substring 0 2))
(d2 (the-substring 3 5))
(d3 (the-substring 6 8)))
(cons* d1 d2 d3 (read/rp tz-grammar ip))))
((: 2d ":" 2d ":" 2d) ; Full time, without decimal fraction
(let ((d1 (the-substring 0 2))
(d2 (the-substring 3 5))
(d3 (the-substring 6 8)))
(cons* d1 d2 d3 (read/rp tz-grammar ip))))
((: 2d ":" 2d) ; Time without seconds
(let ((d1 (the-substring 0 2))
(d2 (the-substring 3 5)))
(cons* d1 d2 (read/rp tz-grammar ip))))
(else
(error 'w3c-datetine-parse
"Invalid (ISO-8601:2000/W3C-NOTE-datetime) format"
string))))
(define tz-grammar
(regular-grammar ((2d (= 2 digit)))
("Z" '()) ; UTC Timezone
((: (in "-+") 2d ":" 2d) ; Other timezones
(let ((s (if (string=? (the-substring 0 1) "+") 1 -1))
(h (string->number (the-substring 1 3)))
(m (string->number (the-substring 4 6))))
(list (number->string (* s 60 (+ m (* h 60)))))))
(else
(error 'w3c-datetine-parse
"Invalid (ISO-8601:2000/W3C-NOTE-datetime) format"
string))))
(unwind-protect
(read/rp date-grammar ip)
(close-input-port ip))))
;*---------------------------------------------------------------------*/
;* w3c-datetime->date ... */
;*---------------------------------------------------------------------*/
(define (w3c-datetime->date string)
(let loop ((merge '())
(l1 (w3c-datetime-parse string))
(l2 '(year: month: day: hour: min: sec: timezone:)))
(cond
((or (null? l1) (null? l2))
(apply make-date (reverse! merge)))
(else
(if (car l1)
(loop (cons* (string->number (car l1)) (car l2) merge)
(cdr l1)
(cdr l2))
(loop merge (cdr l1) (cdr l2)))))))
;*---------------------------------------------------------------------*/
;* date->w3c-datetime ... */
;*---------------------------------------------------------------------*/
(define (date->w3c-datetime date)
(let ((yr (integer->string (date-year date)))
(mo (integer->string/padding (date-month date) 2))
(dy (integer->string/padding (date-day date) 2))
(hr (integer->string/padding (date-hour date) 2))
(mi (integer->string/padding (date-minute date) 2))
(se (integer->string/padding (date-second date) 2))
(dst (let ((dst (date-is-dst date))) (if (< dst 0) 0 dst)))
(tz (date-timezone date)))
(format "~a-~a-~aT~a:~a:~a~a"
yr mo dy hr mi se
(let ((h (integer->string/padding
(+ dst (quotient (abs tz) 3600)) 2))
(m (integer->string/padding
(quotient (remainder (abs tz) 3600) 60) 2)))
WARNING : mismatch between on timezones between dates
and iso8601 !
It seems that considers the local timezone as a
;; reference with a timezome representing the shift from
the localtime to UTC . For instance , for Paris ( CET ) , the
Bigloo timezone is -3600 . On the other hand , iso8601
;; considers the sun as the center of the galaxy, hence UTC
;; is the temporal reference. Hence, the shift is not *to*
UTC but * from * UTC . So , for instance , for Paris , the shift
is +01:00 .
(cond
((< tz 0) (format "+~a:~a" h m))
((> tz 0) (format "-~a:~a" h m))
(else #\Z))))))
| null | https://raw.githubusercontent.com/manuel-serrano/bigloo/eb650ed4429155f795a32465e009706bbf1b8d74/api/web/src/Llib/date.scm | scheme | *=====================================================================*/
* ------------------------------------------------------------- */
* ------------------------------------------------------------- */
* See: */
* -datetime-19980827 */
* */
*=====================================================================*/
*---------------------------------------------------------------------*/
* The module */
*---------------------------------------------------------------------*/
*---------------------------------------------------------------------*/
* w3c-datetime-timestamp ... */
*---------------------------------------------------------------------*/
*---------------------------------------------------------------------*/
* w3c-datetime-parse ... */
*---------------------------------------------------------------------*/
-datetime
Date followed by time
Full date without time
Year
At this point, the 'T' character has been read by time-grammar
Full time
Full time, without decimal fraction
Time without seconds
UTC Timezone
Other timezones
*---------------------------------------------------------------------*/
* w3c-datetime->date ... */
*---------------------------------------------------------------------*/
*---------------------------------------------------------------------*/
* date->w3c-datetime ... */
*---------------------------------------------------------------------*/
reference with a timezome representing the shift from
considers the sun as the center of the galaxy, hence UTC
is the temporal reference. Hence, the shift is not *to* | * serrano / prgm / project / bigloo / api / web / src / Llib / date.scm * /
* Author : * /
* Creation : Tue Jul 22 08:06:43 2008 * /
* Last change : Tue Mar 22 08:20:59 2011 ( serrano ) * /
* Copyright : 2008 - 11 * /
* W3C dates ( i.e. , ISO8601 date format described in RFC 3339 ) . * /
(module __web_date
(export (w3c-datetime-timestamp::bstring)
(w3c-datetime->date::date ::bstring)
(date->w3c-datetime::bstring ::date)))
(define (w3c-datetime-timestamp)
(date->w3c-datetime (current-date)))
(define (w3c-datetime-parse string)
(let ((ip (open-input-string string)))
(define date-grammar
(regular-grammar ((2d (= 2 digit)) (4d (= 4 digit)))
(let ((d1 (the-substring 0 4))
(d2 (the-substring 5 7))
(d3 (the-substring 8 10)))
(cons* d1 d2 d3 (read/rp time-grammar ip))))
(list (the-substring 0 4) (the-substring 5 7) (the-substring 8 10)))
Year and month
(list (the-substring 0 4) (the-substring 5 7)))
(list (the-substring 0 4)))
(else
(error 'w3c-datetine-parse
"Invalid (ISO-8601:2000/W3C-NOTE-datetime) format"
string))))
(define time-grammar
(regular-grammar ((2d (= 2 digit)))
decimal fraction is ignored as ignores it too
(let ((d1 (the-substring 0 2))
(d2 (the-substring 3 5))
(d3 (the-substring 6 8)))
(cons* d1 d2 d3 (read/rp tz-grammar ip))))
(let ((d1 (the-substring 0 2))
(d2 (the-substring 3 5))
(d3 (the-substring 6 8)))
(cons* d1 d2 d3 (read/rp tz-grammar ip))))
(let ((d1 (the-substring 0 2))
(d2 (the-substring 3 5)))
(cons* d1 d2 (read/rp tz-grammar ip))))
(else
(error 'w3c-datetine-parse
"Invalid (ISO-8601:2000/W3C-NOTE-datetime) format"
string))))
(define tz-grammar
(regular-grammar ((2d (= 2 digit)))
(let ((s (if (string=? (the-substring 0 1) "+") 1 -1))
(h (string->number (the-substring 1 3)))
(m (string->number (the-substring 4 6))))
(list (number->string (* s 60 (+ m (* h 60)))))))
(else
(error 'w3c-datetine-parse
"Invalid (ISO-8601:2000/W3C-NOTE-datetime) format"
string))))
(unwind-protect
(read/rp date-grammar ip)
(close-input-port ip))))
(define (w3c-datetime->date string)
(let loop ((merge '())
(l1 (w3c-datetime-parse string))
(l2 '(year: month: day: hour: min: sec: timezone:)))
(cond
((or (null? l1) (null? l2))
(apply make-date (reverse! merge)))
(else
(if (car l1)
(loop (cons* (string->number (car l1)) (car l2) merge)
(cdr l1)
(cdr l2))
(loop merge (cdr l1) (cdr l2)))))))
(define (date->w3c-datetime date)
(let ((yr (integer->string (date-year date)))
(mo (integer->string/padding (date-month date) 2))
(dy (integer->string/padding (date-day date) 2))
(hr (integer->string/padding (date-hour date) 2))
(mi (integer->string/padding (date-minute date) 2))
(se (integer->string/padding (date-second date) 2))
(dst (let ((dst (date-is-dst date))) (if (< dst 0) 0 dst)))
(tz (date-timezone date)))
(format "~a-~a-~aT~a:~a:~a~a"
yr mo dy hr mi se
(let ((h (integer->string/padding
(+ dst (quotient (abs tz) 3600)) 2))
(m (integer->string/padding
(quotient (remainder (abs tz) 3600) 60) 2)))
WARNING : mismatch between on timezones between dates
and iso8601 !
It seems that considers the local timezone as a
the localtime to UTC . For instance , for Paris ( CET ) , the
Bigloo timezone is -3600 . On the other hand , iso8601
UTC but * from * UTC . So , for instance , for Paris , the shift
is +01:00 .
(cond
((< tz 0) (format "+~a:~a" h m))
((> tz 0) (format "-~a:~a" h m))
(else #\Z))))))
|
1b10f45c769952f82ba7a55cb4ee772d7c5d4f9fa860efe6ddd77a7f7ef2ba25 | ghosthamlet/algorithm-data-structure | binary_search_tree_test.clj | (ns algorithm-data-structure.data-structures.binary-search-tree-test
(:require [algorithm-data-structure.data-structures.binary-search-tree :as bst]
[algorithm-data-structure.data-structures.hash-table :as ht]
[clojure.test :refer :all]))
(def tree (bst/create))
(deftest create-test
(is (= {:root {:left nil
:right nil
:has-parent true
:value nil
:meta (ht/create)}}
(bst/create))))
(deftest uncle-test
(is (= {:left nil
:right {:left nil
:right nil
:has-parent true
:value 0.5
:meta (ht/create)}
:has-parent true
:value 0
:meta (ht/create)}
(-> tree
(bst/insert 1)
(bst/insert 0)
(bst/insert 2)
(bst/insert 3)
(bst/insert 0.5)
(bst/uncle {:left nil
:right nil
:has-parent true
:value 3
:meta (ht/create)})))))
| null | https://raw.githubusercontent.com/ghosthamlet/algorithm-data-structure/017f41a79d8b1d62ff5a6cceffa1b0f0ad3ead6b/test/algorithm_data_structure/data_structures/binary_search_tree_test.clj | clojure | (ns algorithm-data-structure.data-structures.binary-search-tree-test
(:require [algorithm-data-structure.data-structures.binary-search-tree :as bst]
[algorithm-data-structure.data-structures.hash-table :as ht]
[clojure.test :refer :all]))
(def tree (bst/create))
(deftest create-test
(is (= {:root {:left nil
:right nil
:has-parent true
:value nil
:meta (ht/create)}}
(bst/create))))
(deftest uncle-test
(is (= {:left nil
:right {:left nil
:right nil
:has-parent true
:value 0.5
:meta (ht/create)}
:has-parent true
:value 0
:meta (ht/create)}
(-> tree
(bst/insert 1)
(bst/insert 0)
(bst/insert 2)
(bst/insert 3)
(bst/insert 0.5)
(bst/uncle {:left nil
:right nil
:has-parent true
:value 3
:meta (ht/create)})))))
|
|
6fea15cc599f4409b97485208f380a5ec8aa4c060423ec2115a299d2e6034954 | lehins/massiv | FuseSeq.hs | # LANGUAGE FlexibleContexts #
{-# LANGUAGE GADTs #-}
module Main where
import Bench
import Bench.Massiv as A
import Criterion.Main
import Data.Array.Repa as R
import Data.Vector.Unboxed as VU
import Prelude as P
main :: IO ()
main = do
let t2 = (1600, 1200) :: (Int, Int)
defaultMain
[ bgroup
"map (+25)"
[ env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf (computeAs U . A.map (+ 25) . arrDLightIx2 Seq)
)
, env
(return (tupleToIx2T t2))
( bench "Array Ix2T U"
. whnf (computeAs U . A.map (+ 25) . arrDLightIx2T Seq)
)
, env (return t2) (bench "Vector U" . whnf (VU.map (+ 25) . vecLight2))
, env
(return (tupleToSh2 t2))
( bench "Repa DIM2 U"
. whnf (R.computeUnboxedS . R.map (+ 25) . arrDLightSh2)
)
]
, bgroup
"zipWith (*) . map (+25)"
[ env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf
( \sz ->
let a = A.map (+ 25) $ arrDLightIx2 Seq sz
in computeAs U $ A.zipWith (*) a a
)
)
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 U (compute intermediate)"
. whnf
( \sz ->
let a = computeAs U $ A.map (+ 25) $ arrDLightIx2 Seq sz
in computeAs U $ A.zipWith (*) a a
)
)
, env
(return t2)
( bench "Vector U"
. whnf
( \sz ->
let v = VU.map (+ 25) $ vecLight2 sz
in VU.zipWith (*) v v
)
)
, env
(return (tupleToSh2 t2))
( bench "Repa DIM2 U"
. whnf
( \sz ->
let a = R.map (+ 25) $ arrDLightSh2 sz
in R.computeUnboxedS $ R.zipWith (*) a a
)
)
]
, bgroup
"transpose"
[ env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf (computeAs U . A.transpose . arrDLightIx2 Seq)
)
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 Inner"
. whnf (computeAs U . A.transposeInner . arrDLightIx2 Seq)
)
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 Outer"
. whnf (computeAs U . A.transposeOuter . arrDLightIx2 Seq)
)
, env
(return (tupleToSh2 t2))
( bench "Repa DIM2 U"
. whnf (R.computeUnboxedS . R.transpose . arrDLightSh2)
)
]
, bgroup
"append"
[ env
(return t2)
(bench "Vector U" . whnf (\sz -> vecLight2 sz VU.++ vecLight2 sz))
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf
( \sz ->
computeAs U $
A.append' 1 (arrDLightIx2 Seq sz) (arrDLightIx2 Seq sz)
)
)
, env
(return (tupleToSh2 t2))
( bench "Repa Ix2 U"
. whnf
( \sz ->
R.computeUnboxedS $
R.append (arrDLightSh2 sz) (arrDLightSh2 sz)
)
)
]
]
| null | https://raw.githubusercontent.com/lehins/massiv/67a920d4403f210d0bfdad1acc4bec208d80a588/massiv-bench/bench/FuseSeq.hs | haskell | # LANGUAGE GADTs # | # LANGUAGE FlexibleContexts #
module Main where
import Bench
import Bench.Massiv as A
import Criterion.Main
import Data.Array.Repa as R
import Data.Vector.Unboxed as VU
import Prelude as P
main :: IO ()
main = do
let t2 = (1600, 1200) :: (Int, Int)
defaultMain
[ bgroup
"map (+25)"
[ env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf (computeAs U . A.map (+ 25) . arrDLightIx2 Seq)
)
, env
(return (tupleToIx2T t2))
( bench "Array Ix2T U"
. whnf (computeAs U . A.map (+ 25) . arrDLightIx2T Seq)
)
, env (return t2) (bench "Vector U" . whnf (VU.map (+ 25) . vecLight2))
, env
(return (tupleToSh2 t2))
( bench "Repa DIM2 U"
. whnf (R.computeUnboxedS . R.map (+ 25) . arrDLightSh2)
)
]
, bgroup
"zipWith (*) . map (+25)"
[ env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf
( \sz ->
let a = A.map (+ 25) $ arrDLightIx2 Seq sz
in computeAs U $ A.zipWith (*) a a
)
)
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 U (compute intermediate)"
. whnf
( \sz ->
let a = computeAs U $ A.map (+ 25) $ arrDLightIx2 Seq sz
in computeAs U $ A.zipWith (*) a a
)
)
, env
(return t2)
( bench "Vector U"
. whnf
( \sz ->
let v = VU.map (+ 25) $ vecLight2 sz
in VU.zipWith (*) v v
)
)
, env
(return (tupleToSh2 t2))
( bench "Repa DIM2 U"
. whnf
( \sz ->
let a = R.map (+ 25) $ arrDLightSh2 sz
in R.computeUnboxedS $ R.zipWith (*) a a
)
)
]
, bgroup
"transpose"
[ env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf (computeAs U . A.transpose . arrDLightIx2 Seq)
)
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 Inner"
. whnf (computeAs U . A.transposeInner . arrDLightIx2 Seq)
)
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 Outer"
. whnf (computeAs U . A.transposeOuter . arrDLightIx2 Seq)
)
, env
(return (tupleToSh2 t2))
( bench "Repa DIM2 U"
. whnf (R.computeUnboxedS . R.transpose . arrDLightSh2)
)
]
, bgroup
"append"
[ env
(return t2)
(bench "Vector U" . whnf (\sz -> vecLight2 sz VU.++ vecLight2 sz))
, env
(return (tupleToIx2 t2))
( bench "Array Ix2 U"
. whnf
( \sz ->
computeAs U $
A.append' 1 (arrDLightIx2 Seq sz) (arrDLightIx2 Seq sz)
)
)
, env
(return (tupleToSh2 t2))
( bench "Repa Ix2 U"
. whnf
( \sz ->
R.computeUnboxedS $
R.append (arrDLightSh2 sz) (arrDLightSh2 sz)
)
)
]
]
|
3d32332dddd05301edafbb4a73b4010c26a42b66e50b1f125cbda1e17979f6c8 | 40ants/reblocks-ui | heroku-compile.lisp | (declaim (optimize (debug 3)))
(ql:quickload :qlot)
(format t "*load-truename*: ~S~%"
*load-truename*)
(let* ((raw-qlfile (uiop:merge-pathnames* #P"qlfile"
(uiop:pathname-directory-pathname
*load-truename*)))
(qlfile (probe-file raw-qlfile)))
(format t "qlfile: ~S~%"
raw-qlfile)
(unless qlfile
(format t "qlfile not found!~%"))
(qlot/install:install-qlfile qlfile)
(qlot:with-local-quicklisp (qlfile)
(push "./" asdf:*central-registry*)
(ql:quickload "reblocks-ui-docs/index")
(ql:quickload "reblocks/doc/example-server")
;; These modules are required because sources will not be available at runtime
on and SLYNK will die tryng to do asdf : load - system unless we preload
;; these systems into the Lisp image
(ql:quickload '(:slynk/arglists
:slynk/mrepl
:slynk/fancy-inspector
:slynk/package-fu
:slynk/trace-dialog
:slynk/stickers
:slynk/indentation))))
| null | https://raw.githubusercontent.com/40ants/reblocks-ui/6c91a6d51130bea801b879e2f267cf539ba84cc9/heroku-compile.lisp | lisp | These modules are required because sources will not be available at runtime
these systems into the Lisp image | (declaim (optimize (debug 3)))
(ql:quickload :qlot)
(format t "*load-truename*: ~S~%"
*load-truename*)
(let* ((raw-qlfile (uiop:merge-pathnames* #P"qlfile"
(uiop:pathname-directory-pathname
*load-truename*)))
(qlfile (probe-file raw-qlfile)))
(format t "qlfile: ~S~%"
raw-qlfile)
(unless qlfile
(format t "qlfile not found!~%"))
(qlot/install:install-qlfile qlfile)
(qlot:with-local-quicklisp (qlfile)
(push "./" asdf:*central-registry*)
(ql:quickload "reblocks-ui-docs/index")
(ql:quickload "reblocks/doc/example-server")
on and SLYNK will die tryng to do asdf : load - system unless we preload
(ql:quickload '(:slynk/arglists
:slynk/mrepl
:slynk/fancy-inspector
:slynk/package-fu
:slynk/trace-dialog
:slynk/stickers
:slynk/indentation))))
|
425e85f76f25236961cd9c04d9e673b37b6eb6b9ab8a548b5722ddd64841f80b | johanatan/speako | schema.cljs | Copyright ( c ) 2016
(ns speako.schema
(:require [cljs.nodejs :as node]
[cljs.core.match :refer-macros [match]]
[speako.common :as common]
[instaparse.core :as insta]))
(def fs (node/require "fs"))
(def ^:private grammar
"<S> = TYPE+
TYPE = <WS> (OBJECT | UNION | ENUM) <WS>
<OBJECT> = TYPE_KEYWORD <RWS> IDENTIFIER <WS> <'{'> FIELD+ <WS> <'}'>
TYPE_KEYWORD = 'type'
RWS = #'\\s+'
WS = #'\\s*'
IDENTIFIER = #'[a-zA-Z0-9_]+'
FIELD = <WS> IDENTIFIER <WS> <':'> <WS> (LIST | DATATYPE) [NOTNULL]
LIST = <'['> DATATYPE <']'>
NOTNULL = <'!'>
DATATYPE = 'ID' | 'Boolean' | 'String' | 'Float' | 'Int' | IDENTIFIER
<UNION> = UNION_KEYWORD <RWS> IDENTIFIER <WS> <'='> <WS> IDENTIFIER <WS> OR_CLAUSE+
UNION_KEYWORD = 'union'
<OR_CLAUSE> = <'|'> <WS> IDENTIFIER <WS>
<ENUM> = ENUM_KEYWORD <RWS> IDENTIFIER <WS> <'{'> <WS> ENUM_VAL COMMA_ENUM_VAL+ <WS> <'}'>
ENUM_KEYWORD = 'enum'
ENUM_VAL = IDENTIFIER
<COMMA_ENUM_VAL> = <WS> <','> <WS> ENUM_VAL")
(node/enable-util-print!)
(def ^:private type-language-parser (insta/parser grammar :output-format :enlive))
(defn- extract-content [m] (get m :content))
(defn- extract-single-content [m] (common/single (extract-content m)))
(defn- extract-field-descriptors [parsed]
(assert (= :FIELD (get parsed :tag)))
(let [content (extract-content parsed)
[field-comp type-comp & not-null-comp] content
fieldname (extract-single-content field-comp)
is-list? (= :LIST (get type-comp :tag))
dt-content (extract-single-content ((if is-list? extract-single-content identity) type-comp))
datatype (extract-single-content dt-content)
is-not-null? (= 1 (count not-null-comp))]
[fieldname datatype is-list? is-not-null?]))
(defn- get-object-descriptors [parsed]
(assert (= {:tag :TYPE_KEYWORD :content (list "type")} (first parsed)))
(let [[_ typename-comp & field-comps] parsed
typename (extract-single-content typename-comp)
field-descriptors (map extract-field-descriptors field-comps)]
(assert (not= typename "Timestamp") "Timestamp is a reserved entity provided by speako.")
[typename field-descriptors]))
(defn- get-union-descriptors [parsed]
(assert (= {:tag :UNION_KEYWORD :content (list "union")} (first parsed)))
(let [typename (extract-single-content (second parsed))
constituents (map #(extract-single-content %) (drop 2 parsed))]
[typename constituents]))
(defn- get-enum-descriptors [parsed]
(assert (= {:tag :ENUM_KEYWORD, :content (list "enum")}) (first parsed))
(let [typename (extract-single-content (second parsed))
values (map-indexed (fn [i p]
{(extract-single-content (extract-single-content p))
{:value (+ 1 i)}}) (drop 2 parsed))]
[typename values]))
(defprotocol TypeConsumer
(consume-object [this typename field-descriptors])
(consume-union [this typename constituents])
(consume-enum [this typename constituents])
(finished [this]))
(defmulti load-schema #(.existsSync fs %))
(defmethod load-schema true [filename & consumers]
(apply load-schema (.toString (.readFileSync fs filename)) consumers))
(defmethod load-schema false [schema-str & consumers]
(let [parsed (type-language-parser schema-str)]
(common/dbg-banner-print "Parsed: %s" parsed)
(doseq [p parsed]
(assert (= :TYPE (get p :tag)) (common/format "Expected :TYPE. Actual: %s. Parsed: %s" (get p :tag) p))
(let [content (extract-content p)
[impl descriptors] (match [(get (first content) :tag)]
[:UNION_KEYWORD] [consume-union (get-union-descriptors content)]
[:TYPE_KEYWORD] [consume-object (get-object-descriptors content)]
[:ENUM_KEYWORD] [consume-enum (get-enum-descriptors content)])]
(doseq [consumer consumers]
(apply (partial impl consumer) descriptors))))
[schema-str (doall (map finished consumers))]))
| null | https://raw.githubusercontent.com/johanatan/speako/0d542aaaf1dcf73d84250c325529a51668284722/src/speako/schema.cljs | clojure | Copyright ( c ) 2016
(ns speako.schema
(:require [cljs.nodejs :as node]
[cljs.core.match :refer-macros [match]]
[speako.common :as common]
[instaparse.core :as insta]))
(def fs (node/require "fs"))
(def ^:private grammar
"<S> = TYPE+
TYPE = <WS> (OBJECT | UNION | ENUM) <WS>
<OBJECT> = TYPE_KEYWORD <RWS> IDENTIFIER <WS> <'{'> FIELD+ <WS> <'}'>
TYPE_KEYWORD = 'type'
RWS = #'\\s+'
WS = #'\\s*'
IDENTIFIER = #'[a-zA-Z0-9_]+'
FIELD = <WS> IDENTIFIER <WS> <':'> <WS> (LIST | DATATYPE) [NOTNULL]
LIST = <'['> DATATYPE <']'>
NOTNULL = <'!'>
DATATYPE = 'ID' | 'Boolean' | 'String' | 'Float' | 'Int' | IDENTIFIER
<UNION> = UNION_KEYWORD <RWS> IDENTIFIER <WS> <'='> <WS> IDENTIFIER <WS> OR_CLAUSE+
UNION_KEYWORD = 'union'
<OR_CLAUSE> = <'|'> <WS> IDENTIFIER <WS>
<ENUM> = ENUM_KEYWORD <RWS> IDENTIFIER <WS> <'{'> <WS> ENUM_VAL COMMA_ENUM_VAL+ <WS> <'}'>
ENUM_KEYWORD = 'enum'
ENUM_VAL = IDENTIFIER
<COMMA_ENUM_VAL> = <WS> <','> <WS> ENUM_VAL")
(node/enable-util-print!)
(def ^:private type-language-parser (insta/parser grammar :output-format :enlive))
(defn- extract-content [m] (get m :content))
(defn- extract-single-content [m] (common/single (extract-content m)))
(defn- extract-field-descriptors [parsed]
(assert (= :FIELD (get parsed :tag)))
(let [content (extract-content parsed)
[field-comp type-comp & not-null-comp] content
fieldname (extract-single-content field-comp)
is-list? (= :LIST (get type-comp :tag))
dt-content (extract-single-content ((if is-list? extract-single-content identity) type-comp))
datatype (extract-single-content dt-content)
is-not-null? (= 1 (count not-null-comp))]
[fieldname datatype is-list? is-not-null?]))
(defn- get-object-descriptors [parsed]
(assert (= {:tag :TYPE_KEYWORD :content (list "type")} (first parsed)))
(let [[_ typename-comp & field-comps] parsed
typename (extract-single-content typename-comp)
field-descriptors (map extract-field-descriptors field-comps)]
(assert (not= typename "Timestamp") "Timestamp is a reserved entity provided by speako.")
[typename field-descriptors]))
(defn- get-union-descriptors [parsed]
(assert (= {:tag :UNION_KEYWORD :content (list "union")} (first parsed)))
(let [typename (extract-single-content (second parsed))
constituents (map #(extract-single-content %) (drop 2 parsed))]
[typename constituents]))
(defn- get-enum-descriptors [parsed]
(assert (= {:tag :ENUM_KEYWORD, :content (list "enum")}) (first parsed))
(let [typename (extract-single-content (second parsed))
values (map-indexed (fn [i p]
{(extract-single-content (extract-single-content p))
{:value (+ 1 i)}}) (drop 2 parsed))]
[typename values]))
(defprotocol TypeConsumer
(consume-object [this typename field-descriptors])
(consume-union [this typename constituents])
(consume-enum [this typename constituents])
(finished [this]))
(defmulti load-schema #(.existsSync fs %))
(defmethod load-schema true [filename & consumers]
(apply load-schema (.toString (.readFileSync fs filename)) consumers))
(defmethod load-schema false [schema-str & consumers]
(let [parsed (type-language-parser schema-str)]
(common/dbg-banner-print "Parsed: %s" parsed)
(doseq [p parsed]
(assert (= :TYPE (get p :tag)) (common/format "Expected :TYPE. Actual: %s. Parsed: %s" (get p :tag) p))
(let [content (extract-content p)
[impl descriptors] (match [(get (first content) :tag)]
[:UNION_KEYWORD] [consume-union (get-union-descriptors content)]
[:TYPE_KEYWORD] [consume-object (get-object-descriptors content)]
[:ENUM_KEYWORD] [consume-enum (get-enum-descriptors content)])]
(doseq [consumer consumers]
(apply (partial impl consumer) descriptors))))
[schema-str (doall (map finished consumers))]))
|
|
6bfea5a7f72cfce820311710455e46b5618e1f782059938b8e5f3ee72fda3924 | ahrefs/atd | test2j.expected.ml | (* Auto-generated from "test2.atd" *)
[@@@ocaml.warning "-27-32-33-35-39"]
open Test
open Test2
open Testj
let write_poly write__aa write__bb = (
Testj.write_poly write__aa write__bb
)
let string_of_poly write__aa write__bb ?(len = 1024) x =
let ob = Buffer.create len in
write_poly write__aa write__bb ob x;
Buffer.contents ob
let read_poly read__aa read__bb = (
Testj.read_poly read__aa read__bb
)
let poly_of_string read__aa read__bb s =
read_poly read__aa read__bb (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__int_int_poly = (
write_poly Yojson.Safe.write_int Yojson.Safe.write_int
)
let string_of__int_int_poly ?(len = 1024) x =
let ob = Buffer.create len in
write__int_int_poly ob x;
Buffer.contents ob
let read__int_int_poly = (
read_poly Atdgen_runtime.Oj_run.read_int Atdgen_runtime.Oj_run.read_int
)
let _int_int_poly_of_string s =
read__int_int_poly (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write_poly_int2 = (
write__int_int_poly
)
let string_of_poly_int2 ?(len = 1024) x =
let ob = Buffer.create len in
write_poly_int2 ob x;
Buffer.contents ob
let read_poly_int2 = (
read__int_int_poly
)
let poly_int2_of_string s =
read_poly_int2 (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__string_option = (
Atdgen_runtime.Oj_run.write_std_option (
Yojson.Safe.write_string
)
)
let string_of__string_option ?(len = 1024) x =
let ob = Buffer.create len in
write__string_option ob x;
Buffer.contents ob
let read__string_option = (
fun p lb ->
Yojson.Safe.read_space p lb;
match Yojson.Safe.start_any_variant p lb with
| `Edgy_bracket -> (
match Yojson.Safe.read_ident p lb with
| "None" ->
Yojson.Safe.read_space p lb;
Yojson.Safe.read_gt p lb;
(None : _ option)
| "Some" ->
Atdgen_runtime.Oj_run.read_until_field_value p lb;
let x = (
Atdgen_runtime.Oj_run.read_string
) p lb
in
Yojson.Safe.read_space p lb;
Yojson.Safe.read_gt p lb;
(Some x : _ option)
| x ->
Atdgen_runtime.Oj_run.invalid_variant_tag p x
)
| `Double_quote -> (
match Yojson.Safe.finish_string p lb with
| "None" ->
(None : _ option)
| x ->
Atdgen_runtime.Oj_run.invalid_variant_tag p x
)
| `Square_bracket -> (
match Atdgen_runtime.Oj_run.read_string p lb with
| "Some" ->
Yojson.Safe.read_space p lb;
Yojson.Safe.read_comma p lb;
Yojson.Safe.read_space p lb;
let x = (
Atdgen_runtime.Oj_run.read_string
) p lb
in
Yojson.Safe.read_space p lb;
Yojson.Safe.read_rbr p lb;
(Some x : _ option)
| x ->
Atdgen_runtime.Oj_run.invalid_variant_tag p x
)
)
let _string_option_of_string s =
read__string_option (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__int_string_option_poly = (
write_poly Yojson.Safe.write_int write__string_option
)
let string_of__int_string_option_poly ?(len = 1024) x =
let ob = Buffer.create len in
write__int_string_option_poly ob x;
Buffer.contents ob
let read__int_string_option_poly = (
read_poly Atdgen_runtime.Oj_run.read_int read__string_option
)
let _int_string_option_poly_of_string s =
read__int_string_option_poly (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write_test2 : _ -> test2 -> _ = (
fun ob (x : test2) ->
Buffer.add_char ob '{';
let is_first = ref true in
if !is_first then
is_first := false
else
Buffer.add_char ob ',';
Buffer.add_string ob "\"test0\":";
(
write_poly_int2
)
ob x.test0;
if !is_first then
is_first := false
else
Buffer.add_char ob ',';
Buffer.add_string ob "\"test1\":";
(
write__int_string_option_poly
)
ob x.test1;
Buffer.add_char ob '}';
)
let string_of_test2 ?(len = 1024) x =
let ob = Buffer.create len in
write_test2 ob x;
Buffer.contents ob
let read_test2 = (
fun p lb ->
Yojson.Safe.read_space p lb;
Yojson.Safe.read_lcurl p lb;
let field_test0 = ref (None) in
let field_test1 = ref (None) in
try
Yojson.Safe.read_space p lb;
Yojson.Safe.read_object_end lb;
Yojson.Safe.read_space p lb;
let f =
fun s pos len ->
if pos < 0 || len < 0 || pos + len > String.length s then
invalid_arg (Printf.sprintf "out-of-bounds substring position or length: string = %S, requested position = %i, requested length = %i" s pos len);
if len = 5 && String.unsafe_get s pos = 't' && String.unsafe_get s (pos+1) = 'e' && String.unsafe_get s (pos+2) = 's' && String.unsafe_get s (pos+3) = 't' then (
match String.unsafe_get s (pos+4) with
| '0' -> (
0
)
| '1' -> (
1
)
| _ -> (
-1
)
)
else (
-1
)
in
let i = Yojson.Safe.map_ident p f lb in
Atdgen_runtime.Oj_run.read_until_field_value p lb;
(
match i with
| 0 ->
field_test0 := (
Some (
(
read_poly_int2
) p lb
)
);
| 1 ->
field_test1 := (
Some (
(
read__int_string_option_poly
) p lb
)
);
| _ -> (
Yojson.Safe.skip_json p lb
)
);
while true do
Yojson.Safe.read_space p lb;
Yojson.Safe.read_object_sep p lb;
Yojson.Safe.read_space p lb;
let f =
fun s pos len ->
if pos < 0 || len < 0 || pos + len > String.length s then
invalid_arg (Printf.sprintf "out-of-bounds substring position or length: string = %S, requested position = %i, requested length = %i" s pos len);
if len = 5 && String.unsafe_get s pos = 't' && String.unsafe_get s (pos+1) = 'e' && String.unsafe_get s (pos+2) = 's' && String.unsafe_get s (pos+3) = 't' then (
match String.unsafe_get s (pos+4) with
| '0' -> (
0
)
| '1' -> (
1
)
| _ -> (
-1
)
)
else (
-1
)
in
let i = Yojson.Safe.map_ident p f lb in
Atdgen_runtime.Oj_run.read_until_field_value p lb;
(
match i with
| 0 ->
field_test0 := (
Some (
(
read_poly_int2
) p lb
)
);
| 1 ->
field_test1 := (
Some (
(
read__int_string_option_poly
) p lb
)
);
| _ -> (
Yojson.Safe.skip_json p lb
)
);
done;
assert false;
with Yojson.End_of_object -> (
(
{
test0 = (match !field_test0 with Some x -> x | None -> Atdgen_runtime.Oj_run.missing_field p "test0");
test1 = (match !field_test1 with Some x -> x | None -> Atdgen_runtime.Oj_run.missing_field p "test1");
}
: test2)
)
)
let test2_of_string s =
read_test2 (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__int_string_poly = (
write_poly Yojson.Safe.write_int Yojson.Safe.write_string
)
let string_of__int_string_poly ?(len = 1024) x =
let ob = Buffer.create len in
write__int_string_poly ob x;
Buffer.contents ob
let read__int_string_poly = (
read_poly Atdgen_runtime.Oj_run.read_int Atdgen_runtime.Oj_run.read_string
)
let _int_string_poly_of_string s =
read__int_string_poly (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write_poly_int_string = (
write__int_string_poly
)
let string_of_poly_int_string ?(len = 1024) x =
let ob = Buffer.create len in
write_poly_int_string ob x;
Buffer.contents ob
let read_poly_int_string = (
read__int_string_poly
)
let poly_int_string_of_string s =
read_poly_int_string (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let create_test2
~test0
~test1
() : test2 =
{
test0 = test0;
test1 = test1;
}
| null | https://raw.githubusercontent.com/ahrefs/atd/1f2b3bcc54d14159a5e25e9b23b5c9bed163721c/atdgen/test/test2j.expected.ml | ocaml | Auto-generated from "test2.atd" | [@@@ocaml.warning "-27-32-33-35-39"]
open Test
open Test2
open Testj
let write_poly write__aa write__bb = (
Testj.write_poly write__aa write__bb
)
let string_of_poly write__aa write__bb ?(len = 1024) x =
let ob = Buffer.create len in
write_poly write__aa write__bb ob x;
Buffer.contents ob
let read_poly read__aa read__bb = (
Testj.read_poly read__aa read__bb
)
let poly_of_string read__aa read__bb s =
read_poly read__aa read__bb (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__int_int_poly = (
write_poly Yojson.Safe.write_int Yojson.Safe.write_int
)
let string_of__int_int_poly ?(len = 1024) x =
let ob = Buffer.create len in
write__int_int_poly ob x;
Buffer.contents ob
let read__int_int_poly = (
read_poly Atdgen_runtime.Oj_run.read_int Atdgen_runtime.Oj_run.read_int
)
let _int_int_poly_of_string s =
read__int_int_poly (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write_poly_int2 = (
write__int_int_poly
)
let string_of_poly_int2 ?(len = 1024) x =
let ob = Buffer.create len in
write_poly_int2 ob x;
Buffer.contents ob
let read_poly_int2 = (
read__int_int_poly
)
let poly_int2_of_string s =
read_poly_int2 (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__string_option = (
Atdgen_runtime.Oj_run.write_std_option (
Yojson.Safe.write_string
)
)
let string_of__string_option ?(len = 1024) x =
let ob = Buffer.create len in
write__string_option ob x;
Buffer.contents ob
let read__string_option = (
fun p lb ->
Yojson.Safe.read_space p lb;
match Yojson.Safe.start_any_variant p lb with
| `Edgy_bracket -> (
match Yojson.Safe.read_ident p lb with
| "None" ->
Yojson.Safe.read_space p lb;
Yojson.Safe.read_gt p lb;
(None : _ option)
| "Some" ->
Atdgen_runtime.Oj_run.read_until_field_value p lb;
let x = (
Atdgen_runtime.Oj_run.read_string
) p lb
in
Yojson.Safe.read_space p lb;
Yojson.Safe.read_gt p lb;
(Some x : _ option)
| x ->
Atdgen_runtime.Oj_run.invalid_variant_tag p x
)
| `Double_quote -> (
match Yojson.Safe.finish_string p lb with
| "None" ->
(None : _ option)
| x ->
Atdgen_runtime.Oj_run.invalid_variant_tag p x
)
| `Square_bracket -> (
match Atdgen_runtime.Oj_run.read_string p lb with
| "Some" ->
Yojson.Safe.read_space p lb;
Yojson.Safe.read_comma p lb;
Yojson.Safe.read_space p lb;
let x = (
Atdgen_runtime.Oj_run.read_string
) p lb
in
Yojson.Safe.read_space p lb;
Yojson.Safe.read_rbr p lb;
(Some x : _ option)
| x ->
Atdgen_runtime.Oj_run.invalid_variant_tag p x
)
)
let _string_option_of_string s =
read__string_option (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__int_string_option_poly = (
write_poly Yojson.Safe.write_int write__string_option
)
let string_of__int_string_option_poly ?(len = 1024) x =
let ob = Buffer.create len in
write__int_string_option_poly ob x;
Buffer.contents ob
let read__int_string_option_poly = (
read_poly Atdgen_runtime.Oj_run.read_int read__string_option
)
let _int_string_option_poly_of_string s =
read__int_string_option_poly (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write_test2 : _ -> test2 -> _ = (
fun ob (x : test2) ->
Buffer.add_char ob '{';
let is_first = ref true in
if !is_first then
is_first := false
else
Buffer.add_char ob ',';
Buffer.add_string ob "\"test0\":";
(
write_poly_int2
)
ob x.test0;
if !is_first then
is_first := false
else
Buffer.add_char ob ',';
Buffer.add_string ob "\"test1\":";
(
write__int_string_option_poly
)
ob x.test1;
Buffer.add_char ob '}';
)
let string_of_test2 ?(len = 1024) x =
let ob = Buffer.create len in
write_test2 ob x;
Buffer.contents ob
let read_test2 = (
fun p lb ->
Yojson.Safe.read_space p lb;
Yojson.Safe.read_lcurl p lb;
let field_test0 = ref (None) in
let field_test1 = ref (None) in
try
Yojson.Safe.read_space p lb;
Yojson.Safe.read_object_end lb;
Yojson.Safe.read_space p lb;
let f =
fun s pos len ->
if pos < 0 || len < 0 || pos + len > String.length s then
invalid_arg (Printf.sprintf "out-of-bounds substring position or length: string = %S, requested position = %i, requested length = %i" s pos len);
if len = 5 && String.unsafe_get s pos = 't' && String.unsafe_get s (pos+1) = 'e' && String.unsafe_get s (pos+2) = 's' && String.unsafe_get s (pos+3) = 't' then (
match String.unsafe_get s (pos+4) with
| '0' -> (
0
)
| '1' -> (
1
)
| _ -> (
-1
)
)
else (
-1
)
in
let i = Yojson.Safe.map_ident p f lb in
Atdgen_runtime.Oj_run.read_until_field_value p lb;
(
match i with
| 0 ->
field_test0 := (
Some (
(
read_poly_int2
) p lb
)
);
| 1 ->
field_test1 := (
Some (
(
read__int_string_option_poly
) p lb
)
);
| _ -> (
Yojson.Safe.skip_json p lb
)
);
while true do
Yojson.Safe.read_space p lb;
Yojson.Safe.read_object_sep p lb;
Yojson.Safe.read_space p lb;
let f =
fun s pos len ->
if pos < 0 || len < 0 || pos + len > String.length s then
invalid_arg (Printf.sprintf "out-of-bounds substring position or length: string = %S, requested position = %i, requested length = %i" s pos len);
if len = 5 && String.unsafe_get s pos = 't' && String.unsafe_get s (pos+1) = 'e' && String.unsafe_get s (pos+2) = 's' && String.unsafe_get s (pos+3) = 't' then (
match String.unsafe_get s (pos+4) with
| '0' -> (
0
)
| '1' -> (
1
)
| _ -> (
-1
)
)
else (
-1
)
in
let i = Yojson.Safe.map_ident p f lb in
Atdgen_runtime.Oj_run.read_until_field_value p lb;
(
match i with
| 0 ->
field_test0 := (
Some (
(
read_poly_int2
) p lb
)
);
| 1 ->
field_test1 := (
Some (
(
read__int_string_option_poly
) p lb
)
);
| _ -> (
Yojson.Safe.skip_json p lb
)
);
done;
assert false;
with Yojson.End_of_object -> (
(
{
test0 = (match !field_test0 with Some x -> x | None -> Atdgen_runtime.Oj_run.missing_field p "test0");
test1 = (match !field_test1 with Some x -> x | None -> Atdgen_runtime.Oj_run.missing_field p "test1");
}
: test2)
)
)
let test2_of_string s =
read_test2 (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write__int_string_poly = (
write_poly Yojson.Safe.write_int Yojson.Safe.write_string
)
let string_of__int_string_poly ?(len = 1024) x =
let ob = Buffer.create len in
write__int_string_poly ob x;
Buffer.contents ob
let read__int_string_poly = (
read_poly Atdgen_runtime.Oj_run.read_int Atdgen_runtime.Oj_run.read_string
)
let _int_string_poly_of_string s =
read__int_string_poly (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let write_poly_int_string = (
write__int_string_poly
)
let string_of_poly_int_string ?(len = 1024) x =
let ob = Buffer.create len in
write_poly_int_string ob x;
Buffer.contents ob
let read_poly_int_string = (
read__int_string_poly
)
let poly_int_string_of_string s =
read_poly_int_string (Yojson.Safe.init_lexer ()) (Lexing.from_string s)
let create_test2
~test0
~test1
() : test2 =
{
test0 = test0;
test1 = test1;
}
|
ea841cdc9ca4d2f84af5d586a8667477c9f173fa5003af98efabecb7c66c67c5 | okuoku/nausicaa | run.sps | Copyright ( c ) 2008
;;;
;;;This library is free software; you can redistribute it and/or modify
it under the terms of the GNU Library General Public License as
published by the Free Software Foundation ; either version 2 of the
;;;License, or (at your option) any later version.
;;;
;;;This library is distributed in the hope that it will be useful, but
;;;WITHOUT ANY WARRANTY; without even the implied warranty of
;;;MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details .
;;;
You should have received a copy of the GNU Library General Public
License along with this library ; if not , write to the Free Software
Foundation , Inc. , 51 Franklin Street , Fifth Floor , Boston , MA
02110 - 1301 USA .
#!r6rs
(import (rnrs)
(tests r6rs test)
(tests r6rs base)
(tests r6rs reader)
(tests r6rs unicode)
(tests r6rs bytevectors)
(tests r6rs lists)
(tests r6rs sorting)
(tests r6rs control)
(tests r6rs records syntactic)
(tests r6rs records procedural)
(tests r6rs exceptions)
(tests r6rs conditions)
(tests r6rs io ports)
(tests r6rs io simple)
(tests r6rs programs)
(tests r6rs arithmetic fixnums)
(tests r6rs arithmetic flonums)
(tests r6rs arithmetic bitwise)
(tests r6rs syntax-case)
(tests r6rs hashtables)
(tests r6rs enums)
(tests r6rs eval)
(tests r6rs mutable-pairs)
(tests r6rs mutable-strings)
(tests r6rs r5rs)
(tests r6rs contrib))
(run-base-tests)
(run-reader-tests)
(run-unicode-tests)
(run-bytevectors-tests)
(run-lists-tests)
(run-sorting-tests)
(run-control-tests)
(run-records-syntactic-tests)
(run-records-procedural-tests)
(run-exceptions-tests)
(run-conditions-tests)
(run-io-ports-tests)
(run-io-simple-tests)
(run-programs-tests)
(run-arithmetic-fixnums-tests)
(run-arithmetic-flonums-tests)
(run-arithmetic-bitwise-tests)
(run-syntax-case-tests)
(run-hashtables-tests)
(run-enums-tests)
(run-eval-tests)
(run-mutable-pairs-tests)
(run-mutable-strings-tests)
(run-r5rs-tests)
(run-contrib-tests)
(report-test-results)
| null | https://raw.githubusercontent.com/okuoku/nausicaa/50e7b4d4141ad4d81051588608677223fe9fb715/scheme/tests/r6rs/run.sps | scheme |
This library is free software; you can redistribute it and/or modify
either version 2 of the
License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
if not , write to the Free Software | Copyright ( c ) 2008
it under the terms of the GNU Library General Public License as
Library General Public License for more details .
You should have received a copy of the GNU Library General Public
Foundation , Inc. , 51 Franklin Street , Fifth Floor , Boston , MA
02110 - 1301 USA .
#!r6rs
(import (rnrs)
(tests r6rs test)
(tests r6rs base)
(tests r6rs reader)
(tests r6rs unicode)
(tests r6rs bytevectors)
(tests r6rs lists)
(tests r6rs sorting)
(tests r6rs control)
(tests r6rs records syntactic)
(tests r6rs records procedural)
(tests r6rs exceptions)
(tests r6rs conditions)
(tests r6rs io ports)
(tests r6rs io simple)
(tests r6rs programs)
(tests r6rs arithmetic fixnums)
(tests r6rs arithmetic flonums)
(tests r6rs arithmetic bitwise)
(tests r6rs syntax-case)
(tests r6rs hashtables)
(tests r6rs enums)
(tests r6rs eval)
(tests r6rs mutable-pairs)
(tests r6rs mutable-strings)
(tests r6rs r5rs)
(tests r6rs contrib))
(run-base-tests)
(run-reader-tests)
(run-unicode-tests)
(run-bytevectors-tests)
(run-lists-tests)
(run-sorting-tests)
(run-control-tests)
(run-records-syntactic-tests)
(run-records-procedural-tests)
(run-exceptions-tests)
(run-conditions-tests)
(run-io-ports-tests)
(run-io-simple-tests)
(run-programs-tests)
(run-arithmetic-fixnums-tests)
(run-arithmetic-flonums-tests)
(run-arithmetic-bitwise-tests)
(run-syntax-case-tests)
(run-hashtables-tests)
(run-enums-tests)
(run-eval-tests)
(run-mutable-pairs-tests)
(run-mutable-strings-tests)
(run-r5rs-tests)
(run-contrib-tests)
(report-test-results)
|
dee9398a119a7c774235db127a852f6accd0b9edba241a591d05ac1b897f4b18 | JoelSanchez/ventas | ui.cljs | (ns ventas.utils.ui)
(defn with-handler [cb]
(fn [e]
(doto e
.preventDefault
.stopPropagation)
(cb e)))
| null | https://raw.githubusercontent.com/JoelSanchez/ventas/dc8fc8ff9f63dfc8558ecdaacfc4983903b8e9a1/src/cljs/ventas/utils/ui.cljs | clojure | (ns ventas.utils.ui)
(defn with-handler [cb]
(fn [e]
(doto e
.preventDefault
.stopPropagation)
(cb e)))
|
|
d10de21ed948b168ffc42acfeb0e25d353e16dbfa4f9dd84469825ca734a4a68 | benzap/fif | def.cljc | (ns fif.def
(:require
[clojure.string :as str]
[fif.stack-machine :as stack]
[fif.stack-machine.evaluators :as evaluators]
[fif.stack-machine.error-handling :as error-handling]
[fif.stack-machine.exceptions :as exceptions]
[fif.stack-machine.verification :as verification]
[fif.stack-machine.words :as words]
[fif.stack-machine.variable :as variable]))
(defn wrap-code-eval
[args]
(fn [sm]
(-> sm
(evaluators/eval-fn args)
(stack/set-step-num 0))))
(defn wrap-function-with-arity
"Wraps a clojure function `f` which accepts `num-args`, and returns
the function wrapped to be used in a stack machine.
The returned function accepts `num-args` values on the stack, drops
`num-args` after processing the wrapped function, and pushes the
result of `(apply f args)` back onto the stack.
Notes:
This wrapper always returns a result on the stack. If you do not
wish to return a result, use `fif.def/wrap-procedure-with-arity`
instead.
Examples:
(defn add2 [x] (+ x 2))
(def my-stack-machine (-> (fif.stack/new-stack-machine)
(fif.stack/set-word 'add2 (fif.def/wrap-function-with-arity 1 add2))))
(fif.core/with-stack my-stack-machine
= > ' ( 3 ) "
[num-args f]
(fn [sm]
(cond
;; Check to see if the main stack has enough arguments to
;; satisfy the word operation.
(not (verification/stack-satisfies-arity? sm num-args))
(exceptions/raise-incorrect-arity-error sm num-args)
:else
(let [args (take num-args (stack/get-stack sm))
result (apply f (reverse args))
new-stack (as-> sm $
(stack/get-stack $)
(drop num-args $)
(concat [result] $)
(apply list $))]
(-> sm
(stack/set-stack new-stack)
stack/dequeue-code)))))
(defn wrap-procedure-with-arity
"Wraps a clojure function `f` which accepts `num-args`, and returns
the function wrapped to be used in a stack machine.
The returned function accepts `num-args` values on the stack, drops
`num-args` after processing the wrapped function.
Notes:
This wrapper never returns a result on the stack. If you wish to
return a result, use `fif.def/wrap-function-with-arity` instead.
Examples:
(def val (atom nil))
(defn set-val! [x] (reset! val x))
(def my-stack-machine (-> (fif.stack/new-stack-machine)
(fif.stack/set-word 'set-val!
(fif.def/wrap-procedure-with-arity 1 set-val!))))
(fif.core/with-stack my-stack-machine
(fif.core/reval 1 set-val!)
= > 1
"
[num-args f]
(fn [sm]
(cond
;; Check to see if the main stack has enough arguments to
;; satisfy the word function.
(not (verification/stack-satisfies-arity? sm num-args))
(exceptions/raise-incorrect-arity-error sm num-args)
:else
(let [args (take num-args (stack/get-stack sm))
_ (apply f (reverse args))
new-stack (->> sm stack/get-stack (drop num-args))]
(-> sm
(stack/set-stack (into '() new-stack))
stack/dequeue-code)))))
(defmacro defcode-eval
"Allows you to define functions that contain fif code, which can then
be passed through a fif stack machine to be evaluated.
Example:
(defcode-eval import-add2-library
fn add2
+ 2
endfn)
(def custom-stack-machine
(-> fif.core/*default-stack*
import-add2-library))
(fif.core/with-stack custom-stack-machine
= > ' ( 4 ) "
[name & body]
`(def ~name (wrap-code-eval (quote ~body))))
;;
;; Define Stack Functions
;;
(defmacro defstack-func-0 [name f]
`(def ~name (wrap-function-with-arity 0 ~f)))
(defmacro defstack-func-1 [name f]
`(def ~name (wrap-function-with-arity 1 ~f)))
(defmacro defstack-func-2 [name f]
`(def ~name (wrap-function-with-arity 2 ~f)))
(defmacro defstack-func-3 [name f]
`(def ~name (wrap-function-with-arity 3 ~f)))
;;
;; Define Stack Procedures
;;
(defmacro defstack-proc-0 [name f]
`(def ~name (wrap-procedure-with-arity 0 ~f)))
(defmacro defstack-proc-1 [name f]
`(def ~name (wrap-procedure-with-arity 1 ~f)))
(defmacro defstack-proc-2 [name f]
`(def ~name (wrap-procedure-with-arity 2 ~f)))
(defmacro defstack-proc-3 [name f]
`(def ~name (wrap-procedure-with-arity 3 ~f)))
;;
;; Define Global Variables
;;
(def wrap-variable variable/wrap-global-variable)
(defn set-word-variable
"Creates a new word variable in `sm` with the symbol name `wname` and
with the value `value`. Optionally, you can include a docstring, and
a group key.
Notes:
- `value` is automatically wrapped for you. This is not the case
with `set-word-definition`."
[sm wname value & {:keys [doc group]}]
(words/set-global-word-defn
sm
wname (wrap-variable value)
:doc doc :group group
:stdlib? false
:variable? true))
;;
;; Define Global Words
;;
(defn set-word-function
"Creates a new word function in `sm` with the symbol name `wname` and
with the word function defined by `wfunc`.
Notes:
- `wfunc` is a stack-machine function. Normal clojure functions can
be turned into stack-machine functions via
`wrap-function-with-arity` and `wrap-procedure-with-arity`."
[sm wname wfunc & {:keys [doc group]}]
(words/set-global-word-defn
sm
wname wfunc
:doc doc :group group
:stdlib? false
:variable? false))
| null | https://raw.githubusercontent.com/benzap/fif/972adab8b86c016b04babea49d52198585172fe3/src/fif/def.cljc | clojure | Check to see if the main stack has enough arguments to
satisfy the word operation.
Check to see if the main stack has enough arguments to
satisfy the word function.
Define Stack Functions
Define Stack Procedures
Define Global Variables
Define Global Words
| (ns fif.def
(:require
[clojure.string :as str]
[fif.stack-machine :as stack]
[fif.stack-machine.evaluators :as evaluators]
[fif.stack-machine.error-handling :as error-handling]
[fif.stack-machine.exceptions :as exceptions]
[fif.stack-machine.verification :as verification]
[fif.stack-machine.words :as words]
[fif.stack-machine.variable :as variable]))
(defn wrap-code-eval
[args]
(fn [sm]
(-> sm
(evaluators/eval-fn args)
(stack/set-step-num 0))))
(defn wrap-function-with-arity
"Wraps a clojure function `f` which accepts `num-args`, and returns
the function wrapped to be used in a stack machine.
The returned function accepts `num-args` values on the stack, drops
`num-args` after processing the wrapped function, and pushes the
result of `(apply f args)` back onto the stack.
Notes:
This wrapper always returns a result on the stack. If you do not
wish to return a result, use `fif.def/wrap-procedure-with-arity`
instead.
Examples:
(defn add2 [x] (+ x 2))
(def my-stack-machine (-> (fif.stack/new-stack-machine)
(fif.stack/set-word 'add2 (fif.def/wrap-function-with-arity 1 add2))))
(fif.core/with-stack my-stack-machine
= > ' ( 3 ) "
[num-args f]
(fn [sm]
(cond
(not (verification/stack-satisfies-arity? sm num-args))
(exceptions/raise-incorrect-arity-error sm num-args)
:else
(let [args (take num-args (stack/get-stack sm))
result (apply f (reverse args))
new-stack (as-> sm $
(stack/get-stack $)
(drop num-args $)
(concat [result] $)
(apply list $))]
(-> sm
(stack/set-stack new-stack)
stack/dequeue-code)))))
(defn wrap-procedure-with-arity
"Wraps a clojure function `f` which accepts `num-args`, and returns
the function wrapped to be used in a stack machine.
The returned function accepts `num-args` values on the stack, drops
`num-args` after processing the wrapped function.
Notes:
This wrapper never returns a result on the stack. If you wish to
return a result, use `fif.def/wrap-function-with-arity` instead.
Examples:
(def val (atom nil))
(defn set-val! [x] (reset! val x))
(def my-stack-machine (-> (fif.stack/new-stack-machine)
(fif.stack/set-word 'set-val!
(fif.def/wrap-procedure-with-arity 1 set-val!))))
(fif.core/with-stack my-stack-machine
(fif.core/reval 1 set-val!)
= > 1
"
[num-args f]
(fn [sm]
(cond
(not (verification/stack-satisfies-arity? sm num-args))
(exceptions/raise-incorrect-arity-error sm num-args)
:else
(let [args (take num-args (stack/get-stack sm))
_ (apply f (reverse args))
new-stack (->> sm stack/get-stack (drop num-args))]
(-> sm
(stack/set-stack (into '() new-stack))
stack/dequeue-code)))))
(defmacro defcode-eval
"Allows you to define functions that contain fif code, which can then
be passed through a fif stack machine to be evaluated.
Example:
(defcode-eval import-add2-library
fn add2
+ 2
endfn)
(def custom-stack-machine
(-> fif.core/*default-stack*
import-add2-library))
(fif.core/with-stack custom-stack-machine
= > ' ( 4 ) "
[name & body]
`(def ~name (wrap-code-eval (quote ~body))))
(defmacro defstack-func-0 [name f]
`(def ~name (wrap-function-with-arity 0 ~f)))
(defmacro defstack-func-1 [name f]
`(def ~name (wrap-function-with-arity 1 ~f)))
(defmacro defstack-func-2 [name f]
`(def ~name (wrap-function-with-arity 2 ~f)))
(defmacro defstack-func-3 [name f]
`(def ~name (wrap-function-with-arity 3 ~f)))
(defmacro defstack-proc-0 [name f]
`(def ~name (wrap-procedure-with-arity 0 ~f)))
(defmacro defstack-proc-1 [name f]
`(def ~name (wrap-procedure-with-arity 1 ~f)))
(defmacro defstack-proc-2 [name f]
`(def ~name (wrap-procedure-with-arity 2 ~f)))
(defmacro defstack-proc-3 [name f]
`(def ~name (wrap-procedure-with-arity 3 ~f)))
(def wrap-variable variable/wrap-global-variable)
(defn set-word-variable
"Creates a new word variable in `sm` with the symbol name `wname` and
with the value `value`. Optionally, you can include a docstring, and
a group key.
Notes:
- `value` is automatically wrapped for you. This is not the case
with `set-word-definition`."
[sm wname value & {:keys [doc group]}]
(words/set-global-word-defn
sm
wname (wrap-variable value)
:doc doc :group group
:stdlib? false
:variable? true))
(defn set-word-function
"Creates a new word function in `sm` with the symbol name `wname` and
with the word function defined by `wfunc`.
Notes:
- `wfunc` is a stack-machine function. Normal clojure functions can
be turned into stack-machine functions via
`wrap-function-with-arity` and `wrap-procedure-with-arity`."
[sm wname wfunc & {:keys [doc group]}]
(words/set-global-word-defn
sm
wname wfunc
:doc doc :group group
:stdlib? false
:variable? false))
|
160acf3af2193a6b90959b325ea3f7a2c63a6a6901acb1138f7abd0a80971530 | senorcarbone/erlang-nnar-kvs | kvs.erl | -module(kvs).
-behavior(gen_server).
-export([start/1, start/2, stop/1, put/3, get/2, configure/3, callbacks/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, addItemToStoreList/3]).
-record(state, {callee, beb, rs, ts=0, store=orddict:new(), writeSet=orddict:new(),
readSet=orddict:new(), writeQuorum=0, readQuorum=0, pendingRead=orddict:new(),
pendingWrite=orddict:new(),pendingGetReply=orddict:new(), pendingPutReply=orddict:new()}).
start(Callee) ->
gen_server:start_link(?MODULE, [Callee], []).
start(Callee, Interv) ->
gen_server:start_link(?MODULE, [Callee, Interv], []).
stop(Pid) ->
gen_server:cast(Pid, stop).
put(Pid, Key, Value) ->
gen_server:call(Pid, {kvs_put, Key, Value}).
get(Pid, Key) ->
gen_server:call(Pid, {kvs_get, Key}).
callbacks(Pid) ->
gen_server:call(Pid, callbacks).
configure(Pid, Instances, Quorums) ->
gen_server:call(Pid, {reconfigure, Instances, Quorums}).
%%GEN_SERVER
init([Callee]) ->
init([Callee, 8000]);
init([Callee, Interv]) ->
{ok, BeB} = beb:start(self()),
{ok, ReplStore} = rs:start(self(), Interv),
{ok, #state{beb = BeB, rs=ReplStore, callee = Callee}}.
handle_call(callbacks, _From, C=#state{}) ->
{reply, {self(), C#state.beb, rs:callbacks(C#state.rs)}, C};
handle_call({reconfigure, Instances, {WriteQuorum, ReadQuorum}}, _From, C=#state{}) ->
BebConfig = lists:foldl(fun({_,BepInst,_}, Acc)-> [BepInst|Acc] end,[], Instances),
RsConfig = lists:foldl(fun({_,_,NrInst}, Acc)-> [NrInst|Acc]end, [], Instances),
beb:configure(C#state.beb, BebConfig),
RsBebConfig = lists:foldl(fun({_,BEB},Acc)-> [BEB|Acc] end, [], RsConfig),
rs:configure(C#state.rs, RsBebConfig),
{reply, ok, C#state{writeQuorum=WriteQuorum, readQuorum=ReadQuorum}};
handle_call({kvs_put, Key, Val}, _From, C=#state{}) ->
beb:broadcast(C#state.beb, {put, {self(), C#state.ts, Key, Val}}),
{reply, ok, C#state{writeSet=orddict:store(Key,[],C#state.writeSet), ts=C#state.ts+1, pendingPutReply=orddict:store(Key, true, C#state.pendingPutReply)}};
handle_call({kvs_get, Key}, _From, C=#state{}) ->
beb:broadcast(C#state.beb, {get, {self(), Key}}),
{reply, ok, C#state{readSet=orddict:store(Key,[],C#state.readSet), pendingGetReply=orddict:store(Key, true, C#state.pendingGetReply)}}.
handle_cast({put_reply, From, Key}, C=#state{}) ->
% io:format("~n ~p got putreply from ~p for key ~p",[self(), From, Key ]),
case orddict:is_key(Key, C#state.pendingPutReply) of
true ->
UpdWriteSet = addItemToStoreList(From, Key, C#state.writeSet),
case length(orddict:fetch(Key,UpdWriteSet)) >= C#state.writeQuorum of
true ->
C#state.callee ! {kvsputreply, Key},
{noreply, C#state{writeSet = orddict:erase(Key, C#state.writeSet), pendingPutReply=orddict:store(Key, false, C#state.pendingPutReply)}};
false ->
{noreply, C#state{writeSet=UpdWriteSet}}
end;
false ->
{noreply, C}
end;
handle_cast({get_reply, From, {NodeId,ValueTS,Key,Value}}, C=#state{}) ->
case orddict:is_key(Key,C#state.pendingGetReply) of
true ->
UpdReadSet = addItemToStoreList({From, {NodeId,ValueTS,Key,Value}}, Key, C#state.readSet),
case length(orddict:fetch(Key,UpdReadSet)) >= C#state.readQuorum of
true ->
{_,{_,_,_,Val}} = lists:max(orddict:fetch(Key,C#state.readSet)),
C#state.callee ! {kvsgetreply, Key, Val},
{noreply, C#state{readSet=orddict:erase(Key, C#state.readSet), pendingGetReply=orddict:store(Key, false, C#state.pendingGetReply)}};
false ->
{noreply, C#state{readSet=UpdReadSet}}
end;
false ->
{noreply,C}
end;
handle_cast(stop, C=#state{}) ->
beb:stop(C#state.beb),
rs:stop(C#state.rs),
{stop, normal, C}.
handle_info({rsValue, Hash, Data}, C=#state{}) ->
io : format("~n ~p got rsValue for hash ~p and Data : ~p " , [ self ( ) , , Data ] ) ,
{NodeId, ValueTS, Key, _Value} = Data,
UpdC =
case orddict:is_key(Key,C#state.store) of
true ->
{StoreTs, _} = orddict:fetch(Key, C#state.store),
NewStore =
case {StoreTs, self()} < {ValueTS, NodeId} of
true ->
orddict:store(Key, {ValueTS, Hash}, C#state.store);
false->
orddict:store(Key, {StoreTs, Hash}, C#state.store)
end,
C#state{ts=lists:max([ValueTS,C#state.ts]), store=NewStore};
false ->
C#state{ts=lists:max([ValueTS,C#state.ts]), store=orddict:store(Key,{ValueTS, Hash},C#state.store)}
end,
case orddict:is_key(Key, UpdC#state.pendingRead) of
true ->
lists:foreach(fun(Node)-> gen_server:cast(Node,{get_reply, self(), Data}) end,
orddict:fetch(Key,UpdC#state.pendingRead));
false -> ok
end,
ToRemove = lists:foldl(
fun({LNode,LNodeID,LValueTS}, Acc)->
gen_server:cast(LNode, {put_reply, self(), Key}),
[{LNode,LNodeID,LValueTS}|Acc]
end,[], orddict:fetch(Key,UpdC#state.pendingWrite)),
PendingW = orddict:fetch(Key, UpdC#state.pendingWrite),
UpdPendingWriteList = lists:foldl(fun(P, Acc)-> lists:delete(P,Acc) end, PendingW, ToRemove),
UpdPendWrite = orddict:store(Key, UpdPendingWriteList ,C#state.pendingWrite),
{noreply, UpdC#state{pendingRead=orddict:new(), pendingWrite=UpdPendWrite}};
handle_info({bebdeliver, {put, Data}}, C=#state{}) ->
% io:format("~n ~p got put for : ~p",[self(), Data]),
{NodeID, ValueTS, Key, _Value} = Data,
io : format("~n ~p updating pendingWrite to ~p",[self ( ) , addItemToStoreList({NodeID , NodeID , ValueTS},Key , C#state.pendingWrite ) ] ) ,
rs:store(C#state.rs, Data),
{noreply, C#state{pendingWrite=addItemToStoreList({NodeID, NodeID, ValueTS},Key,C#state.pendingWrite)}};
handle_info({bebdeliver, {get, {Node, Key}}}, C=#state{}) ->
case orddict:is_key(Key, C#state.store) of
true ->
{_,Hash} = orddict:fetch(Key,C#state.store),
rs:retrieve(C#state.rs, Hash),
{noreply, C#state{pendingRead=addItemToStoreList(Node,Key,C#state.pendingRead)}};
false ->
gen_server:cast(Node,{get_reply, self(), {self(), C#state.ts-1, Key, nil}}),
{noreply, C}
end.
terminate(_Reason, _State) ->
ok.
%%private functions
addItemToStoreList(Item, Key , Store) ->
case(orddict:is_key(Key,Store)) of
true ->
FilteredSet = gb_sets:from_list([Item|orddict:fetch(Key,Store)]),
orddict:store(Key, gb_sets:to_list(FilteredSet),Store);
false ->
orddict:store(Key, [Item], Store)
end.
| null | https://raw.githubusercontent.com/senorcarbone/erlang-nnar-kvs/defb7a529b5fb3c248be7f620fa7fbbd312723b5/src/kvs.erl | erlang | GEN_SERVER
io:format("~n ~p got putreply from ~p for key ~p",[self(), From, Key ]),
io:format("~n ~p got put for : ~p",[self(), Data]),
private functions | -module(kvs).
-behavior(gen_server).
-export([start/1, start/2, stop/1, put/3, get/2, configure/3, callbacks/1]).
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, addItemToStoreList/3]).
-record(state, {callee, beb, rs, ts=0, store=orddict:new(), writeSet=orddict:new(),
readSet=orddict:new(), writeQuorum=0, readQuorum=0, pendingRead=orddict:new(),
pendingWrite=orddict:new(),pendingGetReply=orddict:new(), pendingPutReply=orddict:new()}).
start(Callee) ->
gen_server:start_link(?MODULE, [Callee], []).
start(Callee, Interv) ->
gen_server:start_link(?MODULE, [Callee, Interv], []).
stop(Pid) ->
gen_server:cast(Pid, stop).
put(Pid, Key, Value) ->
gen_server:call(Pid, {kvs_put, Key, Value}).
get(Pid, Key) ->
gen_server:call(Pid, {kvs_get, Key}).
callbacks(Pid) ->
gen_server:call(Pid, callbacks).
configure(Pid, Instances, Quorums) ->
gen_server:call(Pid, {reconfigure, Instances, Quorums}).
init([Callee]) ->
init([Callee, 8000]);
init([Callee, Interv]) ->
{ok, BeB} = beb:start(self()),
{ok, ReplStore} = rs:start(self(), Interv),
{ok, #state{beb = BeB, rs=ReplStore, callee = Callee}}.
handle_call(callbacks, _From, C=#state{}) ->
{reply, {self(), C#state.beb, rs:callbacks(C#state.rs)}, C};
handle_call({reconfigure, Instances, {WriteQuorum, ReadQuorum}}, _From, C=#state{}) ->
BebConfig = lists:foldl(fun({_,BepInst,_}, Acc)-> [BepInst|Acc] end,[], Instances),
RsConfig = lists:foldl(fun({_,_,NrInst}, Acc)-> [NrInst|Acc]end, [], Instances),
beb:configure(C#state.beb, BebConfig),
RsBebConfig = lists:foldl(fun({_,BEB},Acc)-> [BEB|Acc] end, [], RsConfig),
rs:configure(C#state.rs, RsBebConfig),
{reply, ok, C#state{writeQuorum=WriteQuorum, readQuorum=ReadQuorum}};
handle_call({kvs_put, Key, Val}, _From, C=#state{}) ->
beb:broadcast(C#state.beb, {put, {self(), C#state.ts, Key, Val}}),
{reply, ok, C#state{writeSet=orddict:store(Key,[],C#state.writeSet), ts=C#state.ts+1, pendingPutReply=orddict:store(Key, true, C#state.pendingPutReply)}};
handle_call({kvs_get, Key}, _From, C=#state{}) ->
beb:broadcast(C#state.beb, {get, {self(), Key}}),
{reply, ok, C#state{readSet=orddict:store(Key,[],C#state.readSet), pendingGetReply=orddict:store(Key, true, C#state.pendingGetReply)}}.
handle_cast({put_reply, From, Key}, C=#state{}) ->
case orddict:is_key(Key, C#state.pendingPutReply) of
true ->
UpdWriteSet = addItemToStoreList(From, Key, C#state.writeSet),
case length(orddict:fetch(Key,UpdWriteSet)) >= C#state.writeQuorum of
true ->
C#state.callee ! {kvsputreply, Key},
{noreply, C#state{writeSet = orddict:erase(Key, C#state.writeSet), pendingPutReply=orddict:store(Key, false, C#state.pendingPutReply)}};
false ->
{noreply, C#state{writeSet=UpdWriteSet}}
end;
false ->
{noreply, C}
end;
handle_cast({get_reply, From, {NodeId,ValueTS,Key,Value}}, C=#state{}) ->
case orddict:is_key(Key,C#state.pendingGetReply) of
true ->
UpdReadSet = addItemToStoreList({From, {NodeId,ValueTS,Key,Value}}, Key, C#state.readSet),
case length(orddict:fetch(Key,UpdReadSet)) >= C#state.readQuorum of
true ->
{_,{_,_,_,Val}} = lists:max(orddict:fetch(Key,C#state.readSet)),
C#state.callee ! {kvsgetreply, Key, Val},
{noreply, C#state{readSet=orddict:erase(Key, C#state.readSet), pendingGetReply=orddict:store(Key, false, C#state.pendingGetReply)}};
false ->
{noreply, C#state{readSet=UpdReadSet}}
end;
false ->
{noreply,C}
end;
handle_cast(stop, C=#state{}) ->
beb:stop(C#state.beb),
rs:stop(C#state.rs),
{stop, normal, C}.
handle_info({rsValue, Hash, Data}, C=#state{}) ->
io : format("~n ~p got rsValue for hash ~p and Data : ~p " , [ self ( ) , , Data ] ) ,
{NodeId, ValueTS, Key, _Value} = Data,
UpdC =
case orddict:is_key(Key,C#state.store) of
true ->
{StoreTs, _} = orddict:fetch(Key, C#state.store),
NewStore =
case {StoreTs, self()} < {ValueTS, NodeId} of
true ->
orddict:store(Key, {ValueTS, Hash}, C#state.store);
false->
orddict:store(Key, {StoreTs, Hash}, C#state.store)
end,
C#state{ts=lists:max([ValueTS,C#state.ts]), store=NewStore};
false ->
C#state{ts=lists:max([ValueTS,C#state.ts]), store=orddict:store(Key,{ValueTS, Hash},C#state.store)}
end,
case orddict:is_key(Key, UpdC#state.pendingRead) of
true ->
lists:foreach(fun(Node)-> gen_server:cast(Node,{get_reply, self(), Data}) end,
orddict:fetch(Key,UpdC#state.pendingRead));
false -> ok
end,
ToRemove = lists:foldl(
fun({LNode,LNodeID,LValueTS}, Acc)->
gen_server:cast(LNode, {put_reply, self(), Key}),
[{LNode,LNodeID,LValueTS}|Acc]
end,[], orddict:fetch(Key,UpdC#state.pendingWrite)),
PendingW = orddict:fetch(Key, UpdC#state.pendingWrite),
UpdPendingWriteList = lists:foldl(fun(P, Acc)-> lists:delete(P,Acc) end, PendingW, ToRemove),
UpdPendWrite = orddict:store(Key, UpdPendingWriteList ,C#state.pendingWrite),
{noreply, UpdC#state{pendingRead=orddict:new(), pendingWrite=UpdPendWrite}};
handle_info({bebdeliver, {put, Data}}, C=#state{}) ->
{NodeID, ValueTS, Key, _Value} = Data,
io : format("~n ~p updating pendingWrite to ~p",[self ( ) , addItemToStoreList({NodeID , NodeID , ValueTS},Key , C#state.pendingWrite ) ] ) ,
rs:store(C#state.rs, Data),
{noreply, C#state{pendingWrite=addItemToStoreList({NodeID, NodeID, ValueTS},Key,C#state.pendingWrite)}};
handle_info({bebdeliver, {get, {Node, Key}}}, C=#state{}) ->
case orddict:is_key(Key, C#state.store) of
true ->
{_,Hash} = orddict:fetch(Key,C#state.store),
rs:retrieve(C#state.rs, Hash),
{noreply, C#state{pendingRead=addItemToStoreList(Node,Key,C#state.pendingRead)}};
false ->
gen_server:cast(Node,{get_reply, self(), {self(), C#state.ts-1, Key, nil}}),
{noreply, C}
end.
terminate(_Reason, _State) ->
ok.
addItemToStoreList(Item, Key , Store) ->
case(orddict:is_key(Key,Store)) of
true ->
FilteredSet = gb_sets:from_list([Item|orddict:fetch(Key,Store)]),
orddict:store(Key, gb_sets:to_list(FilteredSet),Store);
false ->
orddict:store(Key, [Item], Store)
end.
|
9db2fda709664bbf6ec715dfd825e1c1836fdbba7c9259d88ec44793ff167b52 | chaitanyagupta/chronicity | repeater-fortnight.lisp | -*- Mode : LISP ; Syntax : COMMON - LISP ; Package : CL - USER ; Base : 10 -*-
;;; repeater-fortnight.lisp
;;; See the LICENSE file for licensing information.
(cl:in-package #:chronicity)
(defclass repeater-fortnight (repeater)
((current-fortnight-start :initform nil)))
(defmethod r-next ((repeater repeater-fortnight) pointer)
(with-slots (current-fortnight-start now)
repeater
(let ((sunday (start-of-week now)))
(if (not current-fortnight-start)
(case pointer
(:future (setf current-fortnight-start (datetime-incr sunday :week)))
(:past (setf current-fortnight-start (datetime-decr sunday :week 2))))
(let ((amount (* 2 (if (eql pointer :future) 1 -1))))
(datetime-incf current-fortnight-start :week amount)))
(make-span current-fortnight-start
(datetime-incr current-fortnight-start :week 2)))))
(defmethod r-this ((repeater repeater-fortnight) pointer)
(with-slots (now)
repeater
(let ((sunday (start-of-week now)))
(case pointer
(:future (make-span now (datetime-incr sunday :week 2)))
(:past (make-span sunday now))
(:none (make-span sunday (datetime-incr sunday :week 2) nil now))))))
(defmethod r-offset ((repeater repeater-fortnight) span amount pointer)
(span+ span (* 2 amount (if (eql pointer :future) 1 -1)) :week))
(defmethod r-width ((repeater repeater-fortnight))
+fortnight-seconds+)
| null | https://raw.githubusercontent.com/chaitanyagupta/chronicity/5841d1548cad0ca6917d8e68933124a5af68f5ec/src/repeaters/repeater-fortnight.lisp | lisp | Syntax : COMMON - LISP ; Package : CL - USER ; Base : 10 -*-
repeater-fortnight.lisp
See the LICENSE file for licensing information. |
(cl:in-package #:chronicity)
(defclass repeater-fortnight (repeater)
((current-fortnight-start :initform nil)))
(defmethod r-next ((repeater repeater-fortnight) pointer)
(with-slots (current-fortnight-start now)
repeater
(let ((sunday (start-of-week now)))
(if (not current-fortnight-start)
(case pointer
(:future (setf current-fortnight-start (datetime-incr sunday :week)))
(:past (setf current-fortnight-start (datetime-decr sunday :week 2))))
(let ((amount (* 2 (if (eql pointer :future) 1 -1))))
(datetime-incf current-fortnight-start :week amount)))
(make-span current-fortnight-start
(datetime-incr current-fortnight-start :week 2)))))
(defmethod r-this ((repeater repeater-fortnight) pointer)
(with-slots (now)
repeater
(let ((sunday (start-of-week now)))
(case pointer
(:future (make-span now (datetime-incr sunday :week 2)))
(:past (make-span sunday now))
(:none (make-span sunday (datetime-incr sunday :week 2) nil now))))))
(defmethod r-offset ((repeater repeater-fortnight) span amount pointer)
(span+ span (* 2 amount (if (eql pointer :future) 1 -1)) :week))
(defmethod r-width ((repeater repeater-fortnight))
+fortnight-seconds+)
|
c23fe1610004e590f5301c85611141f0766e9f4058f3ef8df4547c08ef58592b | cosmos72/hyperluminal-mem | b+leaf.lisp | ;; -*- lisp -*-
;; This file is part of Hyperluminal-mem.
Copyright ( c ) 2013 - 2015
;;
;; This library is free software: you can redistribute it and/or
modify it under the terms of the Lisp Lesser General Public License
;; (), known as the LLGPL.
;;
;; This library is distributed in the hope that it will be useful,
;; but WITHOUT ANY WARRANTY; without even the implied warranty
;; of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
;; See the Lisp Lesser General Public License for more details.
(in-package :hyperluminal-mem-tree)
(defun b+leaf (&key (leaf t) size capacity contents contents-start contents-end)
(declare (type (or null b+size) size capacity contents-start contents-end)
(type (or null simple-vector) contents))
(the (values b+node &optional)
(b+node :leaf leaf
:size (and size (1+ size))
:capacity (and capacity (1+ capacity))
:contents contents
:contents-start (when (or contents contents-start)
(1- (or contents-start 0)))
:contents-end contents-end)))
(declaim (inline b+leaf-next))
(defun b+leaf-next (node)
(declare (type b+node node))
(let ((idx (b+size- (b+node-lo node) 1)))
(the (values (or null b+node))
(svref node idx))))
(declaim (inline (setf b+leaf-next)))
(defun (setf b+leaf-next) (value node)
(declare (type b+node node value))
(let ((idx (b+size- (b+node-lo node) 1)))
(setf (svref node idx) value)))
(declaim (inline b+leaf-find))
(defun b+leaf-find (node key &optional default)
(declare (type b+node node)
(type fixnum key))
(let ((lo (b+node-lo node))
(hi (b+node-hi node)))
;; (>= lo hi) means no keys, and leaves cannot have children,
;; much less a lone child without keys
(when (< lo hi)
(loop
;; lo, mid and hi point to keys and must always be even,
;; because odd positions contain values
:do
(let* ((mid (the b+size (logand -2 (ash (+ lo hi) -1))))
(kmid (the fixnum (b+node-ref node mid))))
(cond
((fixnum< key kmid) (setf hi mid))
((fixnum> key kmid) (setf lo mid))
(t (return-from b+leaf-find (values (b+node-ref node (1+ mid)) t)))))
:while (< (+ lo 2) hi))
(when (fixnum= key (b+node-ref node lo))
(return-from b+leaf-find (values (b+node-ref node (1+ lo)) t)))))
(values default nil))
| null | https://raw.githubusercontent.com/cosmos72/hyperluminal-mem/29c23361260e3a94fb1e09f3fdeab469b035504d/tree/b%2Bleaf.lisp | lisp | -*- lisp -*-
This file is part of Hyperluminal-mem.
This library is free software: you can redistribute it and/or
(), known as the LLGPL.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty
of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the Lisp Lesser General Public License for more details.
(>= lo hi) means no keys, and leaves cannot have children,
much less a lone child without keys
lo, mid and hi point to keys and must always be even,
because odd positions contain values |
Copyright ( c ) 2013 - 2015
modify it under the terms of the Lisp Lesser General Public License
(in-package :hyperluminal-mem-tree)
(defun b+leaf (&key (leaf t) size capacity contents contents-start contents-end)
(declare (type (or null b+size) size capacity contents-start contents-end)
(type (or null simple-vector) contents))
(the (values b+node &optional)
(b+node :leaf leaf
:size (and size (1+ size))
:capacity (and capacity (1+ capacity))
:contents contents
:contents-start (when (or contents contents-start)
(1- (or contents-start 0)))
:contents-end contents-end)))
(declaim (inline b+leaf-next))
(defun b+leaf-next (node)
(declare (type b+node node))
(let ((idx (b+size- (b+node-lo node) 1)))
(the (values (or null b+node))
(svref node idx))))
(declaim (inline (setf b+leaf-next)))
(defun (setf b+leaf-next) (value node)
(declare (type b+node node value))
(let ((idx (b+size- (b+node-lo node) 1)))
(setf (svref node idx) value)))
(declaim (inline b+leaf-find))
(defun b+leaf-find (node key &optional default)
(declare (type b+node node)
(type fixnum key))
(let ((lo (b+node-lo node))
(hi (b+node-hi node)))
(when (< lo hi)
(loop
:do
(let* ((mid (the b+size (logand -2 (ash (+ lo hi) -1))))
(kmid (the fixnum (b+node-ref node mid))))
(cond
((fixnum< key kmid) (setf hi mid))
((fixnum> key kmid) (setf lo mid))
(t (return-from b+leaf-find (values (b+node-ref node (1+ mid)) t)))))
:while (< (+ lo 2) hi))
(when (fixnum= key (b+node-ref node lo))
(return-from b+leaf-find (values (b+node-ref node (1+ lo)) t)))))
(values default nil))
|
3f40a506a886144a3d1c3581eb910c6be4bab513a462f45f091070f291e661dc | chetmurthy/utjson | ututil.ml | open Pa_ppx_utils.Std
(* borrowed from ounit *)
let failwithf fmt =
Fmt.kstrf failwith fmt
let raise_failwith loc s = Ploc.raise loc (Failure s)
let raise_failwithf loc fmt =
Fmt.kstrf (raise_failwith loc) fmt
module Stack = struct
let push l x = (l := x :: !l)
let pop l =
match !l with
h::tl -> l := tl
| [] -> invalid_arg "pop"
let top l = List.hd !l
let empty l = [] = !l
end
let mkdir_p s =
let p = Fpath.v s in
let rec mkrec p =
if p |> Bos.OS.Dir.exists |> Rresult.R.get_ok then ()
else begin
mkrec (Fpath.parent p) ;
ignore (Bos.OS.U.mkdir p 0o755)
end
in mkrec p
let rec traverse_json j p = match (j,p) with
(j,[]) -> j
| (`Assoc l, h::t) -> begin match List.assoc h l with
v -> traverse_json v t
| exception Not_found ->
Fmt.(failwithf "traverse_json: path %a was not valid for JSON@.%s@."
(list Dump.string) p
(Yojson.Basic.pretty_to_string j))
end
| (`List l, h::t) -> begin match int_of_string h with
n -> if n < List.length l then
traverse_json (List.nth l n) t
else Fmt.(failwith "traverse_json: path component %a was not an integer for JSON@.%s@."
Dump.string h
(Yojson.Basic.pretty_to_string j))
| exception Not_found ->
Fmt.(failwithf "traverse_json: path %a was not valid for JSON@.%s@."
(list Dump.string) p
(Yojson.Basic.pretty_to_string j))
end
| _ ->
Fmt.(failwithf "traverse_json: path %a was not valid for JSON@.%s@."
(list Dump.string) p
(Yojson.Basic.pretty_to_string j))
let key_compare (k1, _) (k2, _) = Stdlib.compare k1 k2
let canon_json (y : Yojson.Basic.t) : Yojson.Basic.t =
let rec yrec = function
`Null -> `Null
| `Bool b -> `Bool b
| `Float f -> `Float f
| `Int n -> `Float (float_of_int n)
| `String s -> `String s
| `List l -> `List (List.map yrec l)
| `Assoc l -> `Assoc (List.stable_sort key_compare (List.map (fun (k,v) -> (k,yrec v)) l))
in yrec y
let canon l =
l
|> List.sort_uniq Stdlib.compare
|> List.stable_sort Stdlib.compare
let slice n m l =
let alen = List.length l in
let n = match n with
None -> 0
| Some n ->
if n < 0 then
if -n > alen then 0 else alen - n
else
if n > alen then alen else n in
let m = match m with
None -> alen
| Some m ->
if m < 0 then
if -m > alen then alen else alen - m
else
if m > alen then alen else m in
let l = nthtail l n in
firstn (m-n) l
| null | https://raw.githubusercontent.com/chetmurthy/utjson/6e65ce058d6741b937888f51cd5f8f61f51a3936/ututil.ml | ocaml | borrowed from ounit | open Pa_ppx_utils.Std
let failwithf fmt =
Fmt.kstrf failwith fmt
let raise_failwith loc s = Ploc.raise loc (Failure s)
let raise_failwithf loc fmt =
Fmt.kstrf (raise_failwith loc) fmt
module Stack = struct
let push l x = (l := x :: !l)
let pop l =
match !l with
h::tl -> l := tl
| [] -> invalid_arg "pop"
let top l = List.hd !l
let empty l = [] = !l
end
let mkdir_p s =
let p = Fpath.v s in
let rec mkrec p =
if p |> Bos.OS.Dir.exists |> Rresult.R.get_ok then ()
else begin
mkrec (Fpath.parent p) ;
ignore (Bos.OS.U.mkdir p 0o755)
end
in mkrec p
let rec traverse_json j p = match (j,p) with
(j,[]) -> j
| (`Assoc l, h::t) -> begin match List.assoc h l with
v -> traverse_json v t
| exception Not_found ->
Fmt.(failwithf "traverse_json: path %a was not valid for JSON@.%s@."
(list Dump.string) p
(Yojson.Basic.pretty_to_string j))
end
| (`List l, h::t) -> begin match int_of_string h with
n -> if n < List.length l then
traverse_json (List.nth l n) t
else Fmt.(failwith "traverse_json: path component %a was not an integer for JSON@.%s@."
Dump.string h
(Yojson.Basic.pretty_to_string j))
| exception Not_found ->
Fmt.(failwithf "traverse_json: path %a was not valid for JSON@.%s@."
(list Dump.string) p
(Yojson.Basic.pretty_to_string j))
end
| _ ->
Fmt.(failwithf "traverse_json: path %a was not valid for JSON@.%s@."
(list Dump.string) p
(Yojson.Basic.pretty_to_string j))
let key_compare (k1, _) (k2, _) = Stdlib.compare k1 k2
let canon_json (y : Yojson.Basic.t) : Yojson.Basic.t =
let rec yrec = function
`Null -> `Null
| `Bool b -> `Bool b
| `Float f -> `Float f
| `Int n -> `Float (float_of_int n)
| `String s -> `String s
| `List l -> `List (List.map yrec l)
| `Assoc l -> `Assoc (List.stable_sort key_compare (List.map (fun (k,v) -> (k,yrec v)) l))
in yrec y
let canon l =
l
|> List.sort_uniq Stdlib.compare
|> List.stable_sort Stdlib.compare
let slice n m l =
let alen = List.length l in
let n = match n with
None -> 0
| Some n ->
if n < 0 then
if -n > alen then 0 else alen - n
else
if n > alen then alen else n in
let m = match m with
None -> alen
| Some m ->
if m < 0 then
if -m > alen then alen else alen - m
else
if m > alen then alen else m in
let l = nthtail l n in
firstn (m-n) l
|
46bc55ffe86b01f155bcd4c5e14924b052ab838ad4953e7829c9bf3cab26dde1 | mbenke/zpf2013 | DiaBadCat.hs | module DiaCat where
import Dialogue1
cat :: Dialogue
cat ~(Success : ~((Str userInput) : ~(Success : ~(r4 : _))))
= [ AppendChan stdout "enter filename\n",
ReadChan stdin,
AppendChan stdout name,
ReadFile name,
AppendChan stdout
(case r4 of
Str contents -> contents
Failure ioerr -> "can’t open file")
] where (name : _) = lines userInput
cat2 :: Dialogue
cat2 (r1 : (r2 : ~(r3 : ~(r4 : _))))
= AppendChan stdout "enter filename\n" : case r1 of
Success -> ReadChan stdin : case r2 of
(Str userInput) -> let (name:_) = lines userInput in [
AppendChan stdout name,
ReadFile name,
AppendChan stdout
(case r4 of
Str contents -> contents
Failure ioerr -> "can’t open file")
]
e2 -> error(show e2)
e1 -> error (show e1)
-- where (name : _) = lines userInput
main = runDialogue cat2 | null | https://raw.githubusercontent.com/mbenke/zpf2013/85f32747e17f07a74e1c3cb064b1d6acaca3f2f0/Code/IO/DiaBadCat.hs | haskell | where (name : _) = lines userInput | module DiaCat where
import Dialogue1
cat :: Dialogue
cat ~(Success : ~((Str userInput) : ~(Success : ~(r4 : _))))
= [ AppendChan stdout "enter filename\n",
ReadChan stdin,
AppendChan stdout name,
ReadFile name,
AppendChan stdout
(case r4 of
Str contents -> contents
Failure ioerr -> "can’t open file")
] where (name : _) = lines userInput
cat2 :: Dialogue
cat2 (r1 : (r2 : ~(r3 : ~(r4 : _))))
= AppendChan stdout "enter filename\n" : case r1 of
Success -> ReadChan stdin : case r2 of
(Str userInput) -> let (name:_) = lines userInput in [
AppendChan stdout name,
ReadFile name,
AppendChan stdout
(case r4 of
Str contents -> contents
Failure ioerr -> "can’t open file")
]
e2 -> error(show e2)
e1 -> error (show e1)
main = runDialogue cat2 |
7e5af03b4a0166a19ec1978bc7e722dbddfd6b881ea66f4c2d5e7fa067eebf27 | jaycfields/jry | set.clj | (ns jry.set)
(defn rel->hash-map [rel key-fn & {:keys [val-fn] :or {val-fn identity}}]
(reduce (fn [result e] (assoc result (key-fn e) (val-fn e))) {} rel))
(defn hash-map->rel [m kk vk]
(map (fn [[k v]] (hash-map kk k vk v)) m))
(defn transform [rel transform-fns]
(map #(reduce (fn [result [t-k t-fn]]
(update-in result [t-k] t-fn))
%
transform-fns)
rel))
(defn- ->combined-values [combine-fns xmap ymap]
(reduce (fn [result [c-k c-fn]]
(if (and (contains? xmap c-k) (contains? ymap c-k))
(assoc result c-k (c-fn (c-k xmap) (c-k ymap)))
result))
{}
combine-fns))
(defn combine [rel combine-fns]
(reduce (fn [xmap ymap]
(merge xmap ymap (->combined-values combine-fns xmap ymap)))
{}
rel))
| null | https://raw.githubusercontent.com/jaycfields/jry/d79cc8ec552c11122001bc1dd01b9ef6c251a9fb/src/clojure/jry/set.clj | clojure | (ns jry.set)
(defn rel->hash-map [rel key-fn & {:keys [val-fn] :or {val-fn identity}}]
(reduce (fn [result e] (assoc result (key-fn e) (val-fn e))) {} rel))
(defn hash-map->rel [m kk vk]
(map (fn [[k v]] (hash-map kk k vk v)) m))
(defn transform [rel transform-fns]
(map #(reduce (fn [result [t-k t-fn]]
(update-in result [t-k] t-fn))
%
transform-fns)
rel))
(defn- ->combined-values [combine-fns xmap ymap]
(reduce (fn [result [c-k c-fn]]
(if (and (contains? xmap c-k) (contains? ymap c-k))
(assoc result c-k (c-fn (c-k xmap) (c-k ymap)))
result))
{}
combine-fns))
(defn combine [rel combine-fns]
(reduce (fn [xmap ymap]
(merge xmap ymap (->combined-values combine-fns xmap ymap)))
{}
rel))
|
|
d2706ec14c24c94bd3cc7ae1e7dcc67a236d82d0891b23e3dda0bd1cabda4644 | zotonic/zotonic | search_query.erl | @author < >
2009 - 2023
@doc Handler for m.search[{query , .. } ]
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% -2.0
%%
%% Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an " AS IS " BASIS ,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
-module(search_query).
-author("Arjan Scherpenisse <>").
%% interface functions
-export([
search/2,
parse_request_args/1,
parse_query_text/1,
build_query/2
]).
%% For testing
-export([
qterm/2,
expand_object_predicates/2
]).
-include_lib("zotonic_core/include/zotonic.hrl").
-define(SQL_SAFE_REGEXP, "^[0-9a-zA-Z_\.]+$").
%% @doc Build a SQL search query from the filter arguments.
-spec search( map() | proplists:proplist(), z:context() ) -> #search_sql_terms{} | #search_result{}.
search(Query, Context) ->
Query1 = filter_empty(Query),
Query2 = lists:filtermap(
fun
({K, V}) when is_binary(K) ->
case request_arg(K) of
undefined -> false;
A -> {true, {A, V}}
end;
({K, _} = KV) when is_atom(K) ->
{true, KV};
({{filter, _}, _} = KV) ->
{true, KV};
({{facet, _}, _} = KV) ->
{true, KV};
({{custom, _}, _} = KV) ->
{true, KV}
end,
Query1),
Query3 = lists:flatten(
lists:map(
fun
({K, #{ <<"all">> := All, <<"any">> := Any }}) ->
All1 = filter_empty( lists:map(fun(V) -> {K, V} end, All) ),
case lists:filter(fun z_utils:is_empty/1, Any) of
[] -> All1;
Any1 -> [ {K, Any1} | All1 ]
end;
({K, #{ <<"all">> := All }}) ->
filter_empty( lists:map(fun(V) -> {K, V} end, All) );
({K, #{ <<"any">> := Any }}) ->
case lists:filter(fun z_utils:is_empty/1, Any) of
[] -> [];
Any1 -> {K, Any1}
end;
(KV) ->
KV
end,
Query2)),
Query4 = case lists:flatten( proplists:get_all_values(cat, Query3) ) of
[] -> Query3;
Cats -> [{cat, Cats} | proplists:delete(cat, Query3)]
end,
Query5 = case lists:flatten( proplists:get_all_values(cat_exclude, Query4) ) of
[] -> Query4;
CatsX -> [{cat_exclude, CatsX} | proplists:delete(cat_exclude, Query4)]
end,
build_query(lists:sort(Query5), Context).
-spec build_query(list(), z:context()) -> #search_sql_terms{} | #search_result{}.
build_query(Terms, Context) ->
Ts = lists:flatten(lists:map(fun(T) -> qterm(T, Context) end, Terms)),
case lists:member(none, Ts) of
true ->
#search_result{};
false ->
#search_sql_terms{ terms = Ts }
end.
%% @doc Fetch all arguments from the query string in the HTTP request.
-spec qargs( z:context() ) -> list( {binary(), term()} ).
qargs(Context) ->
Args = z_context:get_q_all_noz(Context),
lists:filtermap(
fun
({<<"qargs">>, _}) -> false;
({<<"qs">>, V}) -> {true, {<<"text">>, V}};
({<<"q", Term/binary>>, V}) -> {true, {Term, V}};
(_) -> false
end,
Args).
-spec parse_request_args( list( {binary(), term()} ) ) -> list( {atom(), term()} ).
parse_request_args(Args) ->
parse_request_args(Args, []).
parse_request_args([], Acc) ->
Acc;
parse_request_args([{K,V}|Rest], Acc) when is_binary(K) ->
case z_context:is_zotonic_arg(K) of
true ->
parse_request_args(Rest, Acc);
false ->
case request_arg(K) of
undefined -> parse_request_args(Rest, Acc);
Arg -> parse_request_args(Rest, [{Arg, V}|Acc])
end
end;
parse_request_args([{K,V}|Rest], Acc) ->
parse_request_args([{z_convert:to_binary(K), V}|Rest], Acc).
@doc Parses a query text . Every line is an argument ; of which the first
%% '=' separates argument key from argument value.
-spec parse_query_text( binary() | string() | undefined ) -> list( {atom(), term()} ).
parse_query_text(undefined) ->
[];
parse_query_text(Text) when is_list(Text) ->
parse_query_text(list_to_binary(Text));
parse_query_text(Text) when is_binary(Text) ->
Lines = binary:split(Text, <<"\n">>, [global]),
KVs = [ split_arg(z_string:trim(Line)) || Line <- Lines],
Args = [ {request_arg(K), V} || {K,V} <- KVs, K =/= <<>> ],
[ {K,V} || {K,V} <- Args, K =/= undefined ].
split_arg(<<>>) ->
{<<>>, <<>>};
split_arg(B) ->
case binary:split(B, <<"=">>) of
[K,V] -> {z_string:trim(K), z_string:trim(V)};
[K] -> {z_string:trim(K), <<"true">>}
end.
Convert known request arguments to atoms .
request_arg(<<"content_group">>) -> content_group;
request_arg(<<"visible_for">>) -> visible_for;
request_arg(<<"cat">>) -> cat;
request_arg(<<"cat_exact">>) -> cat_exact;
request_arg(<<"cat_exclude">>) -> cat_exclude;
request_arg(<<"creator_id">>) -> creator_id;
request_arg(<<"modifier_id">>) -> modifier_id;
request_arg(<<"facet.", F/binary>>) -> {facet, F};
request_arg(<<"filter">>) -> filter;
request_arg(<<"filter.facet.", F/binary>>)-> {facet, F};
request_arg(<<"filter.", F/binary>>) -> {filter, F};
request_arg(<<"pivot.", _/binary>> = F)-> {filter, F};
request_arg(<<"pivot_", F/binary>>) -> {filter, <<"pivot.", F/binary>>};
request_arg(<<"id">>) -> id;
request_arg(<<"id_exclude">>) -> id_exclude;
request_arg(<<"hasobject">>) -> hasobject;
request_arg(<<"hasobjectpredicate">>) -> hasobjectpredicate;
request_arg(<<"hassubject">>) -> hassubject;
request_arg(<<"hassubjectpredicate">>) -> hassubjectpredicate;
request_arg(<<"hasanyobject">>) -> hasanyobject;
request_arg(<<"hasmedium">>) -> hasmedium;
request_arg(<<"is_authoritative">>) -> is_authoritative;
request_arg(<<"is_featured">>) -> is_featured;
request_arg(<<"is_published">>) -> is_published;
request_arg(<<"is_public">>) -> is_public;
request_arg(<<"is_findable">>) -> is_findable;
request_arg(<<"is_unfindable">>) -> is_unfindable;
request_arg(<<"date_start_after">>) -> date_start_after;
request_arg(<<"date_start_before">>) -> date_start_before;
request_arg(<<"date_start_year">>) -> date_start_year;
request_arg(<<"date_end_after">>) -> date_end_after;
request_arg(<<"date_end_before">>) -> date_end_before;
request_arg(<<"date_end_year">>) -> date_end_year;
request_arg(<<"publication_month">>) -> publication_month;
request_arg(<<"publication_year">>) -> publication_year;
request_arg(<<"publication_after">>) -> publication_after;
request_arg(<<"publication_before">>) -> publication_before;
request_arg(<<"qargs">>) -> qargs;
request_arg(<<"query_id">>) -> query_id;
request_arg(<<"rsc_id">>) -> rsc_id;
request_arg(<<"name">>) -> name;
request_arg(<<"language">>) -> language;
request_arg(<<"sort">>) -> sort;
request_arg(<<"asort">>) -> asort;
request_arg(<<"zsort">>) -> zsort;
request_arg(<<"text">>) -> text;
request_arg(<<"match_objects">>) -> match_objects;
request_arg(<<"match_object_ids">>) -> match_object_ids;
request_arg(<<"upcoming">>) -> upcoming;
request_arg(<<"ongoing">>) -> ongoing;
request_arg(<<"finished">>) -> finished;
request_arg(<<"unfinished">>) -> unfinished;
request_arg(<<"unfinished_or_nodate">>)-> unfinished_or_nodate;
% Skip these
request_arg(<<"page">>) -> undefined;
request_arg(<<"pagelen">>) -> undefined;
request_arg(<<"options">>) -> undefined;
% Complain about all else
request_arg(<<"custompivot">>) ->
?LOG_ERROR(#{
in => zotonic_mod_search,
text => <<"The query term 'custompivot' has been removed. Use filters with 'pivot.pivotname.field' instead.">>,
result => error,
reason => unknown_query_term,
term => <<"custompivot">>
}),
throw({error, {unknown_query_term, custompivot}});
request_arg(Term) ->
{custom, Term}.
%% Private methods start here
%% @doc Drop all empty query arguments. Search forms have empty values
%% for unused filters.
filter_empty(Q) when is_map(Q) ->
filter_empty(maps:to_list(Q));
filter_empty(Q) when is_list(Q) ->
lists:filter(fun({_, X}) -> not(empty_term(X)) end, Q).
empty_term([]) -> true;
empty_term(<<>>) -> true;
empty_term(undefined) -> true;
empty_term(null) -> true;
empty_term([X, _]) -> empty_term(X);
empty_term(_) -> false.
qterm(undefined, _Context) ->
[];
qterm([], _Context) ->
[];
qterm(Ts, Context) when is_list(Ts) ->
lists:map(fun(T) -> qterm(T, Context) end, Ts);
qterm({cat, Cats}, Context) ->
%% cat=categoryname
%% Filter results on a certain category.
Cats1 = assure_categories(Cats, Context),
Cats2 = add_or_append(<<"rsc " > > , Cats1 , [ ] ) ,
#search_sql_term{ cats = [ {<<"rsc">>, Cats1}] };
% parse_query(Rest, Context, Result#search_sql{cats=Cats2});
qterm({cat_exclude, Cats}, Context) ->
%% cat_exclude=categoryname
%% Filter results outside a certain category.
Cats1 = assure_categories(Cats, Context),
#search_sql_term{ cats_exclude = [ {<<"rsc">>, Cats1} ] };
qterm({cat_exact, Cats}, Context) ->
%% cat_exact=categoryname
%% Filter results excactly of a category (excluding subcategories)
Cats1 = assure_categories(Cats, Context),
#search_sql_term{ cats_exact = [ {<<"rsc">>, Cats1} ] };
qterm({content_group, ContentGroup}, Context) ->
%% content_group=id
Include only resources which are member of the given content group ( or one of its children )
Q = #search_sql_term{
extra = [ no_content_group_check ]
},
case rid(ContentGroup, Context) of
any ->
Q;
undefined ->
% Force an empty result
none;
CGId ->
case m_rsc:is_a(CGId, content_group, Context) of
true ->
List = m_hierarchy:contains(<<"content_group">>, ContentGroup, Context),
case m_rsc:p_no_acl(CGId, name, Context) of
<<"default_content_group">> ->
Q#search_sql_term{
where = [
<<"(rsc.content_group_id = any(">>, '$1',
<<"::int[]) or rsc.content_group_id is null)">>
],
args = [
List
]
};
_ ->
Q#search_sql_term{
where = [
<<"rsc.content_group_id = any(">>, '$1',
<<"::int[])">>
],
args = [
List
]
}
end;
false ->
Q#search_sql_term{
where = [
<<"rsc.content_group_id = ">>, '$1'
],
args = [
CGId
]
}
end
end;
qterm({visible_for, VisFor}, _Context) when is_list(VisFor) ->
%% visible_for=[5,6]
%% Filter results for visibility levels
try
VisFor1 = lists:map(fun z_convert:to_integer/1, VisFor),
VisFor2 = lists:filter(fun is_integer/1, VisFor1),
#search_sql_term{
where = [ <<"rsc.visible_for = any(">>, '$1', <<"::int[])">> ],
args = [ VisFor2 ]
}
catch
error:badarg ->
?LOG_WARNING(#{
in => zotonic_mod_search,
text => <<"Search: error converting visible_for search term">>,
result => error,
reason => badarg,
visible_for => VisFor
}),
[]
end;
qterm({visible_for, VisFor}, _Context) ->
%% visible_for=5
%% Filter results for a certain visibility level
try
case z_convert:to_integer(VisFor) of
undefined ->
[];
VisFor1 ->
#search_sql_term{
where = [ <<"rsc.visible_for = ">>, '$1'],
args = [ VisFor1 ]
}
end
catch
error:badarg ->
?LOG_WARNING(#{
in => zotonic_mod_search,
text => <<"Search: error converting visible_for search term">>,
result => error,
reason => badarg,
visible_for => VisFor
}),
[]
end;
qterm({id_exclude, Ids}, Context) when is_list(Ids) ->
%% id_exclude=resource-id
%% Exclude an id or multiple ids from the result
RscIds = lists:filtermap(
fun(Id) ->
case m_rsc:rid(Id, Context) of
undefined -> false;
RscId -> {true, RscId}
end
end,
Ids),
#search_sql_term{
where = [
<<"rsc.id <> any(">>, '$1', <<"::int[])">>
],
args = [ RscIds ]
};
qterm({id_exclude, Id}, Context) ->
case m_rsc:rid(Id, Context) of
undefined ->
[];
RscId ->
#search_sql_term{
where = [ <<"rsc.id <> ">>, '$1'],
args = [ RscId ]
}
end;
qterm({id, Ids}, Context) when is_list(Ids) ->
%% id=resource-id
%% Limit to an id or multiple ids
RscIds = lists:filtermap(
fun(Id) ->
case m_rsc:rid(Id, Context) of
undefined -> false;
RscId -> {true, RscId}
end
end,
Ids),
#search_sql_term{
where = [
<<"rsc.id = any(">>, '$1', <<"::int[])">>
],
args = [ RscIds ]
};
qterm({id, Id}, Context) ->
case m_rsc:rid(Id, Context) of
undefined ->
[];
RscId ->
#search_sql_term{
where = [ <<"rsc.id = ">>, '$1' ],
args = [ RscId ]
}
end;
qterm({hasmedium, HasMedium}, _Context) ->
%% hasmedium=true|false
%% Give all things which have a medium record attached (or not)
case z_convert:to_bool(HasMedium) of
true ->
#search_sql_term{
join_inner = #{
<<"medium">> => {<<"medium">>, <<"medium.id = rsc.id">>}
}
};
false ->
#search_sql_term{
join_left = #{
<<"medium">> => {<<"medium">>, <<"medium.id = rsc.id">>}
},
where = [
<<"medium.id is null ">>
]
}
end;
qterm({hassubject, Id}, Context) ->
parse_edges(hassubject, maybe_split_list(Id), Context);
qterm({hasobject, Id}, Context) ->
parse_edges(hasobject, maybe_split_list(Id), Context);
qterm({hasanyobject, ObjPreds}, Context) ->
%% hasanyobject=[[id,predicate]|id, ...]
%% Give all things which have an outgoing edge to Id with any of the given object/predicate combinations
OPs = expand_object_predicates(ObjPreds, Context),
% rsc.id in (select subject_id from edge where (object_id = ... and predicate_id = ... ) or (...) or ...)
Alias = edge_alias(),
OPClauses = [ object_predicate_clause(Alias, Obj, Pred) || {Obj, Pred} <- OPs ],
#search_sql_term{
where = [
"rsc.id in (select ", Alias ,".subject_id from edge ",Alias," where (",
lists:join(") or (", OPClauses),
"))"
]
};
qterm({hasobjectpredicate, Predicate}, Context) ->
%% hasobjectpredicate=predicate
%% Give all things which have any outgoing edge with given predicate
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".subject_id = rsc.id ">>,
<<" and ">>, Alias, <<".predicate_id = ">>, '$1'
],
args = [
predicate_to_id(Predicate, Context)
]
};
qterm({hassubjectpredicate, Predicate}, Context) ->
%% hassubjectpredicate=predicate
%% Give all things which have any incoming edge with given predicate
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".object_id = rsc.id ">>,
<<" and ">>, Alias, <<".predicate_id = ">>, '$1'
],
args = [
predicate_to_id(Predicate, Context)
]
};
qterm({is_featured, Boolean}, _Context) ->
%% is_featured or is_featured={false,true}
%% Filter on whether an item is featured or not.
#search_sql_term{
where = [
<<"rsc.is_featured = ">>, '$1'
],
args = [
z_convert:to_bool(Boolean)
]
};
qterm({is_published, Boolean}, _Context) ->
%% is_published or is_published={false,true,all}
%% Filter on whether an item is published or not.
case z_convert:to_binary(Boolean) of
<<"all">> ->
#search_sql_term{
extra = [ no_publish_check ]
};
_ ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
extra = [ no_publish_check ],
where = [
<<"rsc.is_published = true and "
"rsc.publication_start <= now() and "
"rsc.publication_end >= now()">>
]
};
false ->
#search_sql_term{
extra = [ no_publish_check ],
where = [
<<"(rsc.is_published = false or "
"rsc.publication_start > now() or "
"rsc.publication_end < now())">>
]
}
end
end;
qterm({is_public, Boolean}, _Context) ->
is_public or is_public={false , true , all }
%% Filter on whether an item is publicly visible or not.
TODO : Adapt this for the different ACL modules
case z_convert:to_binary(Boolean) of
<<"all">> ->
[];
_ ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.visible_for = 0">>
]
};
false ->
#search_sql_term{
where = [
<<"rsc.visible_for > 0">>
]
}
end
end;
qterm({is_findable, Boolean}, _Context) ->
%% is_findable or is_findable={false,true}
%% Filter on whether an item is findable or not.
#search_sql_term{
where = [
<<"rsc.is_unfindable = ">>, '$1'
],
args = [
not z_convert:to_bool(Boolean)
]
};
qterm({is_unfindable, Boolean}, _Context) ->
%% is_unfindable or is_unfindable={false,true}
%% Filter on whether an item is unfindable or not.
#search_sql_term{
where = [
<<"rsc.is_unfindable = ">>, '$1'
],
args = [
z_convert:to_bool(Boolean)
]
};
qterm({upcoming, Boolean}, _Context) ->
%% upcoming
%% Filter on items whose start date lies in the future
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_start >= current_timestamp">>
]
};
false ->
[]
end;
qterm({ongoing, Boolean}, _Context) ->
%% ongoing
%% Filter on items whose date range is around the current date
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_start <= current_timestamp ",
"and rsc.pivot_date_end >= current_timestamp">>
]
};
false ->
[]
end;
qterm({finished, Boolean}, _Context) ->
%% finished
%% Filter on items whose end date lies in the past
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_end < current_timestamp">>
]
};
false ->
[]
end;
qterm({unfinished, Boolean}, _Context) ->
%% Filter on items whose start date lies in the future
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_end >= current_timestamp">>
]
};
false ->
[]
end;
qterm({unfinished_or_nodate, Boolean}, _Context) ->
%% Filter on items whose start date lies in the future or don't have an end_date
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"(rsc.pivot_date_end >= current_date "
"or rsc.pivot_date_start is null)">>
]
};
false ->
[]
end;
qterm({is_authoritative, Boolean}, _Context) ->
%% authoritative={true|false}
%% Filter on items which are authoritative or not
#search_sql_term{
where = [
<<"rsc.is_authoritative = ">>, '$1'
],
args = [
z_convert:to_bool(Boolean)
]
};
qterm({creator_id, Id}, Context) ->
%% creator_id=<rsc id>
%% Filter on items which are created by <rsc id>
#search_sql_term{
where = [
<<"rsc.creator_id = ">>, '$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
qterm({modifier_id, Id}, Context) ->
%% modifier_id=<rsc id>
%% Filter on items which are last modified by <rsc id>
#search_sql_term{
where = [
<<"rsc.modifier_id = ">>, '$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
qterm({qargs, Boolean}, Context) ->
%% qargs
%% Add all query terms from the current query arguments
case z_convert:to_bool(Boolean) of
true ->
Terms = parse_request_args(qargs(Context)),
qterm(Terms, Context);
false ->
[]
end;
qterm({query_id, Id}, Context) ->
%% query_id=<rsc id>
%% Get the query terms from given resource ID, and use those terms.
QArgs = try
parse_query_text(z_html:unescape(m_rsc:p(Id, 'query', Context)))
catch
throw:{error,{unknown_query_term,Term}}:S ->
?LOG_ERROR(#{
text => <<"Unknown query term in search query">>,
in => zotonic_mod_search,
result => error,
reason => unknown_query_term,
query_id => Id,
term => Term,
stack => S
}),
[]
end,
qterm(QArgs, Context);
qterm({rsc_id, Id}, Context) ->
%% rsc_id=<rsc id>
%% Filter to *only* include the given rsc id. Can be used for resource existence check.
#search_sql_term{
where = [
<<"rsc.id = ">>, '$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
qterm({name, Name}, Context) ->
%% name=<name-pattern>
%% Filter on the unique name of a resource.
case z_string:to_lower(mod_search:trim(z_convert:to_binary(Name), Context)) of
All when All =:= <<>>; All =:= <<"*">>; All =:= <<"%">> ->
#search_sql_term{
where = [
<<"rsc.name is not null">>
]
};
Name1 ->
Name2 = binary:replace(Name1, <<"*">>, <<"%">>, [global]),
#search_sql_term{
where = [
<<"rsc.name like ">>, '$1'
],
args = [
Name2
]
}
end;
qterm({language, []}, _Context) ->
%% language=<iso-code>
%% Filter on the presence of a translation
[];
qterm({language, [ Lang | _ ] = Langs}, Context) when is_list(Lang) ->
lists:map(
fun(Code) ->
qterm({language, Code}, Context)
end,
Langs);
qterm({language, [ Lang | _ ] = Langs}, _Context) when is_atom(Lang); is_binary(Lang) ->
Langs1 = lists:map(
fun(Lng) ->
case z_language:to_language_atom(Lng) of
{ok, Code} ->
z_convert:to_binary(Code);
{error, _} ->
<<"x-none">>
end
end,
Langs),
#search_sql_term{
where = [
<<"rsc.language && ">>, '$1'
],
args = [ Langs1 ]
};
qterm({language, Lang}, _Context) ->
case z_language:to_language_atom(Lang) of
{ok, Code} ->
#search_sql_term{
where = [
<<"rsc.language @> ">>, '$1'
],
args = [
[ z_convert:to_binary(Code) ]
]
};
{error, _} ->
% Unknown iso code, ignore
[]
end;
qterm({sort, Sort}, _Context) ->
%% sort=fieldname
%% Order by a given field. Putting a '-' in front of the field name reverts the ordering.
sort_term(Sort);
qterm({asort, Sort}, _Context) ->
asort_term(Sort);
qterm({zsort, Sort}, _Context) ->
zsort_term(Sort);
qterm({{facet, Field}, <<"[", _>> = V}, Context) ->
%% facet.foo=value
%% Add a join with the search_facet table.
V1 = maybe_split_list(V),
qterm({{facet, Field}, V1}, Context);
qterm({{facet, Field}, V}, Context) ->
case search_facet:qterm(sql_safe(Field), V, Context) of
{ok, Res1} ->
Res1;
{error, _} ->
none
end;
qterm({filter, R}, Context) ->
add_filters(R, Context);
qterm({{filter, Field}, V}, Context) ->
{Tab, Alias, Col, Q1} = map_filter_column(Field, #search_sql_term{}),
case pivot_qterm(Tab, Alias, Col, V, Q1, Context) of
{ok, QTerm} ->
QTerm;
{error, _} ->
none
end;
qterm({text, Text}, Context) ->
%% text=...
%% Perform a fulltext search
case mod_search:trim(z_convert:to_binary(Text), Context) of
<<>> ->
[];
<<"id:", S/binary>> ->
#search_sql_term{
where = [
<<"rsc.id">>, '$1'
],
args = [
m_rsc:rid(S, Context)
]
};
_ ->
TsQuery = mod_search:to_tsquery(Text, Context),
#search_sql_term{
where = [
'$1', <<"@@ rsc.pivot_tsv">>
],
sort = [
[
"ts_rank_cd(", mod_search:rank_weight(Context),
", rsc.pivot_tsv, ", '$1', ", ", '$2', ") desc"
]
],
args = [
TsQuery,
mod_search:rank_behaviour(Context)
]
}
end;
qterm({match_objects, RId}, Context) ->
%% match_objects=<id>
Match on the objects of the resource , best matching return first .
%% Similar to the {match_objects id=...} query.
case m_rsc:rid(RId, Context) of
undefined ->
none;
Id ->
ObjectIds = m_edge:objects(Id, Context),
qterm([
{match_object_ids, ObjectIds},
{id_exclude, Id}
], Context)
end;
qterm({match_object_ids, ObjectIds}, Context) ->
ObjectIds1 = [ m_rsc:rid(OId, Context) || OId <- lists:flatten(ObjectIds) ],
MatchTerms = [ ["zpo",integer_to_list(ObjId)] || ObjId <- ObjectIds1, is_integer(ObjId) ],
TsQuery = lists:flatten(lists:join("|", MatchTerms)),
case TsQuery of
[] ->
none;
_ ->
#search_sql_term{
tables = #{
<<"matchquery">> => [ <<"to_tsquery(">>, '$1', <<")">> ]
},
where = [
<<"matchquery @@ rsc.pivot_rtsv">>
],
sort = [
<<"ts_rank(rsc.pivot_rtsv, matchquery) desc">>
],
args = [
TsQuery
]
}
end;
qterm({date_start_after, Date}, Context) ->
%% date_start_after=date
%% Filter on date_start after a specific date.
#search_sql_term{
where = [
<<"rsc.pivot_date_start >= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_start_before, Date}, Context) ->
%% date_start_after=date
%% Filter on date_start before a specific date.
#search_sql_term{
where = [
<<"rsc.pivot_date_start <= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_start_year, Year}, _Context) ->
%% date_start_year=year
%% Filter on year of start date
#search_sql_term{
where = [
<<"date_part('year', rsc.pivot_date_start) ">>, '$1'
],
args = [
z_convert:to_integer(Year)
]
};
qterm({date_end_after, Date}, Context) ->
%% date_end_after=date
%% Filter on date_end after a specific date.
#search_sql_term{
where = [
<<"rsc.pivot_date_end >= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_end_before, Date}, Context) ->
%% date_end_after=date
%% Filter on date_end before a specific date.
#search_sql_term{
where = [
<<"rsc.pivot_date_end <= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_end_year, Year}, _Context) ->
date_end_year = year
Filter on year of end date
#search_sql_term{
where = [
<<"date_part('year', rsc.pivot_date_end) = ">>, '$1'
],
args = [
z_convert:to_integer(Year)
]
};
qterm({publication_year, Year}, _Context) ->
publication_year = year
Filter on year of publication
#search_sql_term{
where = [
<<"date_part('year', rsc.publication_start) = ">>, '$1'
],
args = [
z_convert:to_integer(Year)
]
};
qterm({publication_month, Month}, _Context) ->
%% publication_month=month
Filter on month of publication
#search_sql_term{
where = [
<<"date_part('month', rsc.publication_start) = ">>, '$1'
],
args = [
z_convert:to_integer(Month)
]
};
qterm({publication_after, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.publication_start >= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({publication_before, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.publication_start <= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({{custom, Term}, Arg}, Context) ->
case z_notifier:first(#search_query_term{ term = Term, arg = Arg }, Context) of
undefined ->
?LOG_WARNING(#{
in => zotonic_mod_search,
text => <<"Ignored unknown query search term">>,
term => Term,
arg => Arg,
result => error,
reason => unknown_query_term
}),
[];
[] ->
[];
#search_sql_term{} = SQL ->
SQL
end;
qterm(Term, _Context) ->
%% No match found
throw({error, {unknown_query_term, Term}}).
%%
%% Helper functions
%%
@doc Parse hassubject and hasobject edges .
-spec parse_edges(hassubject | hasobject, list(), z:context()) -> #search_sql_term{}.
parse_edges(Term, [[Id, Predicate]], Context) ->
parse_edges(Term, [[Id, Predicate, "rsc"]], Context);
parse_edges(hassubject, [[Id, Predicate, JoinAlias]], Context) ->
Alias = edge_alias(),
JoinAlias1 = sql_safe(JoinAlias),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".object_id = ">>, JoinAlias1, <<".id">>,
<<" and ">>, Alias, <<".subject_id = ">>, '$1',
<<" and ">>, Alias, <<".predicate_id = ">>, '$2'
],
args = [
m_rsc:rid(Id, Context),
predicate_to_id(Predicate, Context)
]
};
parse_edges(hassubject, [Id], Context) ->
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".object_id = rsc.id">>,
<<" and ">>, Alias, <<".subject_id = ">>,
'$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
parse_edges(hasobject, [[Id, Predicate, JoinAlias]], Context) ->
Alias = edge_alias(),
JoinAlias1 = sql_safe(JoinAlias),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".subject_id = ">>, JoinAlias1, <<".id">>,
<<" and ">>, Alias, <<".object_id = ">>, '$1',
<<" and ">>, Alias, <<".predicate_id = ">>, '$2'
],
args = [
m_rsc:rid(Id, Context),
predicate_to_id(Predicate, Context)
]
};
parse_edges(hasobject, [Id], Context) ->
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".subject_id = rsc.id">>,
<<" and ">>, Alias, <<".object_id = ">>,
'$1'
],
args = [
m_rsc:rid(Id, Context)
]
}.
edge_alias() ->
Nr = z_ids:identifier(6),
<<"edge_", Nr/binary>>.
%% Add a join on the hierarchy table.
add_hierarchy_join(HierarchyName , Lft , Rght , Search ) - >
{ NameArg , Search1 } = add_arg(HierarchyName , Search ) ,
{ LftArg , Search2 } = add_arg(Lft , Search1 ) ,
{ RghtArg , Search3 } = , Search2 ) ,
% A = "h" ++ integer_to_list(length(Search#search_sql.tables)),
% Search4 = add_where(
% A ++ ".name = " ++ NameArg ++ " AND "
% ++ A ++ ".lft >= " ++ LftArg ++ " AND "
% ++ A ++ ".rght <= " ++ RghtArg, Search3),
% Search4#search_sql{
% tables=Search1#search_sql.tables ++ [{hierarchy, A}],
% from=Search1#search_sql.from ++ ", hierarchy " ++ A
% }.
zsort_term(Sort) ->
T = add_order(Sort, #search_sql_term{}),
#search_sql_term{
zsort = T#search_sql_term.sort
}.
asort_term(Sort) ->
T = add_order(Sort, #search_sql_term{}),
#search_sql_term{
asort = T#search_sql_term.sort
}.
sort_term(Sort) ->
add_order(Sort, #search_sql_term{}).
%% Add an ORDER clause.
add_order(<<>>, Search) ->
Search;
add_order([ Order | Os ], Search) when not is_integer(Order) ->
Search1 = add_order(Order, Search),
add_order(Os, Search1);
add_order(Order, Search) when is_atom(Order) ->
add_order(atom_to_binary(Order, utf8), Search);
add_order(Order, Search) when is_list(Order) ->
add_order(list_to_binary(Order), Search);
add_order(<<_, "random">>, Search) ->
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ <<"random()">> ]
};
add_order(<<C, "seq">>, Search) when C =:= $-; C =:= $+ ->
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {edge, C, <<"seq">>}, {edge, C, <<"id">>} ]
};
add_order(<<C, "edge.", Column/binary>>, Search) when C =:= $-; C =:= $+ ->
Column1 = sql_safe(Column),
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {edge, C, Column1} ]
};
add_order(<<C, "pivot.", Pivot/binary>>, Search) when C =:= $-; C =:= $+ ->
case binary:split(Pivot, <<".">>) of
[ PivotTable, Column ] ->
Tab1 = sql_safe(<<"pivot_", PivotTable/binary>>),
Col1 = sql_safe(Column),
Join = Search#search_sql_term.join_inner,
Search#search_sql_term{
join_inner = Join#{
Tab1 => {Tab1, <<Tab1/binary, ".id = rsc.id">>}
},
sort = Search#search_sql_term.sort
++ [ {Tab1, C, Col1} ]
};
[ Column ] ->
Col1 = <<"pivot_", Column/binary>>,
Col2 = sql_safe(Col1),
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {<<"rsc">>, C, Col2} ]
}
end;
add_order(<<C, "facet.", Column/binary>>, Search) when C =:= $-; C =:= $+ ->
Col1 = <<"f_", Column/binary>>,
Col2 = sql_safe(Col1),
Join = Search#search_sql_term.join_inner,
Search#search_sql_term{
join_inner = Join#{
<<"facet">> => {<<"search_facet">>, <<"facet.id = rsc.id">>}
},
sort = Search#search_sql_term.sort
++ [ {<<"facet">>, C, Col2} ]
};
add_order(<<C, Sort/binary>>, Search) when C =:= $-; C =:= $+ ->
Sort1 = sql_safe(Sort),
[ Alias, Column ] = case binary:split(Sort1, <<".">>) of
[ Col ] ->
[ <<"rsc">>, Col ];
[ _, _ ] = AC ->
AC
end,
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {Alias, C, Column} ]
};
add_order(Sort, Search) ->
add_order(<<"+", Sort/binary>>, Search).
%% Make sure that parts of the query are safe to append to the search query.
sql_safe(String) when is_list(String); is_binary(String) ->
case re:run(String, ?SQL_SAFE_REGEXP) of
{match, _} ->
String;
_ ->
throw({error, {unsafe_expression, String}})
end;
sql_safe(String) ->
sql_safe(z_convert:to_binary(String)).
%% Make sure the input is a list of valid categories.
assure_categories(Name, Context) ->
Cats = assure_cats_list(Name),
Cats1 = assure_cat_flatten(Cats),
lists:foldl(fun(C, Acc) ->
case assure_category(C, Context) of
undefined -> Acc;
error -> ['$error'|Acc];
{ok, N} -> [N|Acc]
end
end,
[],
Cats1).
%% Make a single category a list
assure_cats_list(Names) when is_list(Names) ->
Names;
assure_cats_list(Name) ->
[ Name ].
%% Split strings with comma separated lists of categories
-spec assure_cat_flatten(any() | list()) -> list().
assure_cat_flatten(Names) ->
lists:flatten(
lists:map(
fun
(S) when is_binary(S) ->
binary:split(S, <<",">>, [ global ]);
(Name) ->
Name
end,
Names)).
%% Make sure the given name is a category.
assure_category(undefined, _) -> undefined;
assure_category(null, _) -> undefined;
assure_category("", _) -> undefined;
assure_category("*", _) -> undefined;
assure_category(<<>>, _) -> undefined;
assure_category(<<"*">>, _) -> undefined;
assure_category(<<$', _/binary>> = Name, Context) ->
case binary:last(Name) of
$' -> assure_category_1(z_string:trim(Name, $'), Context);
_ -> assure_category_1(Name, Context)
end;
assure_category(<<$", _/binary>> = Name, Context) ->
case binary:last(Name) of
$" -> assure_category_1(z_string:trim(Name, $"), Context);
_ -> assure_category_1(Name, Context)
end;
assure_category(Name, Context) ->
assure_category_1(Name, Context).
assure_category_1(Name, Context) ->
case m_category:name_to_id(Name, Context) of
{ok, _Id} ->
{ok, Name};
_ ->
case m_rsc:rid(Name, Context) of
undefined ->
?LOG_NOTICE(#{
text => <<"Query: unknown category">>,
in => zotonic_mod_search,
name => Name
}),
display_error ( [ ? _ _ ( " Unknown category " , Context ) , 32 , $ " , z_html : escape(z_convert : ) ) , $ " ] , Context ) ,
error;
CatId ->
case m_category:id_to_name(CatId, Context) of
undefined ->
?LOG_NOTICE(#{
text => <<"Query: term is not a category">>,
in => zotonic_mod_search,
name => Name
}),
display_error ( [ $ " , z_html : escape(z_convert : ) ) , $ " , 32 , ? _ _ ( " is not a category " , Context ) ] , Context ) ,
error;
Name1 ->
{ok, Name1}
end
end
end.
%% If the current user is an administrator or editor, show an error message about this search
% display_error(Msg, Context) ->
case z_acl : is_allowed(use , mod_admin , Context ) of
% true ->
ContextPruned = z_context : prune_for_async(Context ) ,
z_session_page : add_script(z_render : growl_error(Msg , ContextPruned ) ) ;
% false ->
% ok
% end.
-spec pivot_qterm(Table, Alias, Column, Value, Q, Context) -> {ok, QResult} | {error, term()}
when Table :: binary(),
Alias :: binary(),
Column :: binary(),
Value :: term(),
Q :: #search_sql_term{},
QResult :: #search_sql_term{},
Context :: z:context().
pivot_qterm(_Tab, _Alias, _Col, [], Q, _Context) ->
{ok, Q};
pivot_qterm(Tab, Alias, Col, [Value], Q, Context) ->
pivot_qterm_1(Tab, Alias, Col, Value, Q, Context);
pivot_qterm(Tab, Alias, Col, Vs, Q, Context) when is_list(Vs) ->
% 'OR' query for all values
Q2 = lists:foldl(
fun(V, QAcc) ->
case pivot_qterm_1(Tab, Alias, Col, V, QAcc, Context) of
{ok, QAcc1} ->
QAcc1;
{error, _} ->
QAcc
end
end,
Q,
Vs),
Q3 = Q2#search_sql_term{
where = [
<<"(">>,
lists:join(<<" OR ">>, Q2#search_sql_term.where),
<<")">>
]
},
{ok, Q3};
pivot_qterm(Tab, Alias, Col, Value, Q, Context) ->
pivot_qterm_1(Tab, Alias, Col, Value, Q, Context).
pivot_qterm_1(Tab, Alias, Col, Value, Query, Context) ->
{Op, Value1} = extract_op(Value),
case z_db:to_column_value(Tab, Col, Value1, Context) of
{ok, Value2} ->
{ArgN, Query2} = add_term_arg(Value2, Query),
W = [
<<Alias/binary, $., Col/binary>>, Op, ArgN
],
Query3 = Query2#search_sql_term{
where = Query2#search_sql_term.where ++ [ W ]
},
{ok, Query3};
{error, Reason} = Error ->
?LOG_WARNING(#{
text => <<"Pivot value error, dropping query term.">>,
in => zotonic_mod_search,
result => error,
reason => Reason,
table => Tab,
alias => Alias,
column => Col,
value => Value1
}),
Error
end.
add_term_arg(ArgValue, #search_sql_term{ args = Args } = Q) ->
Arg = [$$] ++ integer_to_list(length(Args) + 1),
{list_to_atom(Arg), Q#search_sql_term{args = Args ++ [ ArgValue ]}}.
extract_op(<<"=", V/binary>>) ->
{"=", V};
extract_op(<<">", V/binary>>) ->
{">", V};
extract_op(<<"<", V/binary>>) ->
{"<", V};
extract_op(<<"<=", V/binary>>) ->
{"<=", V};
extract_op(<<">=", V/binary>>) ->
{">=", V};
extract_op(<<"!=", V/binary>>) ->
{"<>", V};
extract_op(<<"<>", V/binary>>) ->
{"<>", V};
extract_op(V) ->
{"=", V}.
add_filters(Filters, Context) ->
add_filters(Filters, #search_sql_term{}, Context).
%% Add filters
add_filters(<<"[", _/binary>> = Filter, Q, Context) ->
add_filters(maybe_split_list(Filter), Q, Context);
add_filters([ [Column|_] | _ ] = Filters, Q, Context)
when is_list(Column);
is_binary(Column);
is_atom(Column) ->
add_filters_or(Filters, Q, Context);
add_filters({'or', Filters}, Q, Context) ->
add_filters_or(Filters, Q, Context);
add_filters([Column, Value], R, Context) ->
add_filters([Column, eq, Value], R, Context);
add_filters([Column, Operator, Value], Q, Context) ->
{Tab, Alias, Col, Q1} = map_filter_column(Column, Q),
case z_db:to_column_value(Tab, Col, Value, Context) of
{ok, V1} ->
{Expr, Q2} = create_filter(Tab, Alias, Col, Operator, V1, Q1),
add_filter_where(Expr, Q2);
{error, Reason} ->
?LOG_INFO(#{
text => <<"Search query filter could not be added">>,
result => error,
reason => Reason,
filter_column => Column,
table => Tab,
column => Col,
value => Value
}),
Q
end.
add_filters_or(Filters, Q, Context) ->
{Exprs, Q1} = lists:foldr(
fun(V, Acc) ->
add_filters_or_1(V, Acc, Context)
end,
{[], Q},
Filters),
Or = [ "(", lists:join(<<" or ">>, Exprs), ")" ],
add_filter_where(Or, Q1).
add_filters_or_1([ C, O, V ], {Es, QAcc}, Context) ->
{Tab, Alias, Col, QAcc1} = map_filter_column(C, QAcc),
case z_db:to_column_value(Tab, Col, V, Context) of
{ok, V1} ->
{E, QAcc2} = create_filter(Tab, Alias, Col, O, V1, QAcc1),
{[E|Es], QAcc2};
{error, _} ->
{Es, QAcc}
end;
add_filters_or_1([ C, V ], {Es, QAcc}, Context) ->
add_filters_or_1([ C, eq, V ], {Es, QAcc}, Context).
create_filter(Tab, Alias, Col, Operator, null, Q) ->
create_filter(Tab, Alias, Col, Operator, undefined, Q);
create_filter(_Tab, Alias, Col, Operator, undefined, Q) ->
Operator1 = map_filter_operator(Operator),
{create_filter_null(Alias, Col, Operator1), Q};
create_filter(_Tab, Alias, Col, Operator, Value, Q) ->
{Arg, Q1} = add_filter_arg(Value, Q),
Operator1 = map_filter_operator(Operator),
{[Alias, $., Col, <<" ">>, Operator1, <<" ">>, Arg], Q1}.
map_filter_column(<<"pivot.", P/binary>>, #search_sql_term{ join_inner = Join } = Q) ->
case binary:split(P, <<".">>) of
[ Table, Field ] ->
T1 = sql_safe(Table),
T2 = <<"pivot_", T1/binary>>,
F1 = sql_safe(Field),
Q1 = Q#search_sql_term{
join_inner = Join#{
T2 => {T2, <<T2/binary, ".id = rsc.id">>}
}
},
{T2, T2, F1, Q1};
[ Field ] ->
F1 = z_convert:to_binary(sql_safe(Field)),
{<<"rsc">>, <<"rsc">>, <<"pivot_", F1/binary>>, Q}
end;
map_filter_column(<<"facet.", P/binary>>, #search_sql_term{ join_inner = Join } = Q) ->
Q1 = Q#search_sql_term{
join_inner = Join#{
<<"facet">> => {<<"search_facet">>, <<"facet.id = rsc.id">>}
}
},
Field = sql_safe(P),
{<<"search_facet">>, <<"facet">>, <<"f_", Field/binary>>, Q1};
map_filter_column(Column, Q) ->
Field = sql_safe(Column),
{<<"rsc">>, <<"rsc">>, Field, Q}.
Add an AND clause to the WHERE of a # search_sql_term
%% Clause is already supposed to be safe.
add_filter_where(Clause, #search_sql_term{ where = [] } = Q) ->
Q#search_sql_term{ where = Clause };
add_filter_where(Clause, #search_sql_term{ where = C } = Q) ->
Q#search_sql_term{ where = [ C, <<" and ">>, Clause ] }.
Append an argument to a # search_sql_term
add_filter_arg(ArgValue, #search_sql_term{ args = Args } = Q) ->
Arg = [$$] ++ integer_to_list(length(Args) + 1),
{list_to_atom(Arg), Q#search_sql_term{args = Args ++ [ ArgValue ]}}.
create_filter_null(Alias, Col, "=") ->
[ Alias, $., Col, <<" is null">> ];
create_filter_null(Alias, Col, "<>") ->
[ Alias, $., Col, <<" is not null">> ];
create_filter_null(_Tab, _Col, _Op) ->
"false".
map_filter_operator(eq) -> "=";
map_filter_operator('=') -> "=";
map_filter_operator(ne) -> "<>";
map_filter_operator('<>') -> "<>";
map_filter_operator(gt) -> ">";
map_filter_operator('>') -> ">";
map_filter_operator(lt) -> "<";
map_filter_operator('<') -> "<";
map_filter_operator(gte) -> ">=";
map_filter_operator('>=') -> ">=";
map_filter_operator(lte) -> "<=";
map_filter_operator('<=') -> "<=";
map_filter_operator("=") -> "=";
map_filter_operator("<>") -> "<>";
map_filter_operator(">") -> ">";
map_filter_operator("<") -> "<";
map_filter_operator(">=") -> ">=";
map_filter_operator("<=") -> "<=";
map_filter_operator(<<"=">>) -> "=";
map_filter_operator(<<"<>">>) -> "<>";
map_filter_operator(<<">">>) -> ">";
map_filter_operator(<<"<">>) -> "<";
map_filter_operator(<<">=">>) -> ">=";
map_filter_operator(<<"<=">>) -> "<=";
map_filter_operator(Op) -> throw({error, {unknown_filter_operator, Op}}).
% Convert an expression like [123,hasdocument]
maybe_split_list(<<"[", _/binary>> = Term) ->
unquote_all(search_parse_list:parse(Term));
maybe_split_list("[" ++ _ = Term) ->
unquote_all(search_parse_list:parse(Term));
maybe_split_list(Other) ->
[ Other ].
unquote_all(L) when is_list(L) ->
lists:map(fun unquote_all/1, L);
unquote_all(B) when is_binary(B) ->
unquot(z_string:trim(B));
unquote_all(T) ->
T.
unquot(<<C, Rest/binary>>) when C =:= $'; C =:= $"; C =:= $` ->
binary:replace(Rest, <<C>>, <<>>);
unquot([C|Rest]) when C =:= $'; C =:= $"; C =:= $` ->
[ X || X <- Rest, X =/= C ];
unquot(B) ->
B.
Expand the argument for hasanyobject , make pairs of { , PredicateId }
expand_object_predicates(Bin, Context) when is_binary(Bin) ->
map_rids(search_parse_list:parse(Bin), Context);
expand_object_predicates(OPs, Context) ->
map_rids(OPs, Context).
map_rids({rsc_list, L}, Context) ->
map_rids(L, Context);
map_rids(L, Context) when is_list(L) ->
[ map_rid(unquot(X),Context) || X <- L, X =/= <<>> ];
map_rids(Id, Context) ->
map_rid(Id, Context).
map_rid([], _Context) -> {any, any};
map_rid([Obj,Pred|_], Context) -> {rid(Obj,Context),rid(Pred,Context)};
map_rid([Obj], Context) -> {rid(Obj, Context), any};
map_rid(Obj, Context) -> {rid(Obj, Context), any}.
rid(undefined, _Context) -> undefined;
rid(<<"*">>, _Context) -> any;
rid('*', _Context) -> any;
rid("*", _Context) -> any;
rid("", _Context) -> any;
rid(<<>>, _Context) -> any;
rid(Id, _Context) when is_integer(Id) -> Id;
rid(Id, Context) -> m_rsc:rid(Id, Context).
predicate_to_id([$'|_] = Name, Context) ->
case lists:last(Name) of
$' -> predicate_to_id_1(z_string:trim(Name, $'), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id([$"|_] = Name, Context) ->
case lists:last(Name) of
$" -> predicate_to_id_1(z_string:trim(Name, $"), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id(<<$', _/binary>> = Name, Context) ->
case binary:last(Name) of
$' -> predicate_to_id_1(z_string:trim(Name, $'), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id(<<$", _/binary>> = Name, Context) ->
case binary:last(Name) of
$" -> predicate_to_id_1(z_string:trim(Name, $"), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id(Pred, Context) ->
predicate_to_id_1(Pred, Context).
predicate_to_id_1(Pred, Context) ->
case m_predicate:name_to_id(Pred, Context) of
{ok, Id} ->
Id;
{error, _} ->
?LOG_NOTICE(#{
text => <<"Query: unknown predicate">>,
in => zotonic_mod_search,
predicate => Pred
}),
display_error ( [ ? _ _ ( " Unknown predicate " , Context ) , 32 , $ " , z_html : escape(z_convert : to_binary(Pred ) ) , $ " ] , Context ) ,
0
end.
%% Support routine for "hasanyobject"
object_predicate_clause(_Alias, undefined, undefined) ->
"false";
object_predicate_clause(_Alias, _Object, undefined) ->
"false";
object_predicate_clause(_Alias, undefined, _Predicate) ->
"false";
object_predicate_clause(Alias, any, any) ->
[Alias, ".subject_id = rsc.id"];
object_predicate_clause(Alias, any, PredicateId) when is_integer(PredicateId) ->
[Alias, ".predicate_id = ", integer_to_list(PredicateId)];
object_predicate_clause(Alias, ObjectId, any) when is_integer(ObjectId) ->
[Alias, ".object_id = ", integer_to_list(ObjectId)];
object_predicate_clause(Alias, ObjectId, PredicateId) when is_integer(PredicateId), is_integer(ObjectId) ->
[Alias, ".object_id=", integer_to_list(ObjectId),
" and ", Alias, ".predicate_id=", integer_to_list(PredicateId)].
| null | https://raw.githubusercontent.com/zotonic/zotonic/f74d8ae093fc7d37601c55981c349d59c23f7d1e/apps/zotonic_mod_search/src/support/search_query.erl | erlang | you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
interface functions
For testing
@doc Build a SQL search query from the filter arguments.
@doc Fetch all arguments from the query string in the HTTP request.
'=' separates argument key from argument value.
Skip these
Complain about all else
Private methods start here
@doc Drop all empty query arguments. Search forms have empty values
for unused filters.
cat=categoryname
Filter results on a certain category.
parse_query(Rest, Context, Result#search_sql{cats=Cats2});
cat_exclude=categoryname
Filter results outside a certain category.
cat_exact=categoryname
Filter results excactly of a category (excluding subcategories)
content_group=id
Force an empty result
visible_for=[5,6]
Filter results for visibility levels
visible_for=5
Filter results for a certain visibility level
id_exclude=resource-id
Exclude an id or multiple ids from the result
id=resource-id
Limit to an id or multiple ids
hasmedium=true|false
Give all things which have a medium record attached (or not)
hasanyobject=[[id,predicate]|id, ...]
Give all things which have an outgoing edge to Id with any of the given object/predicate combinations
rsc.id in (select subject_id from edge where (object_id = ... and predicate_id = ... ) or (...) or ...)
hasobjectpredicate=predicate
Give all things which have any outgoing edge with given predicate
hassubjectpredicate=predicate
Give all things which have any incoming edge with given predicate
is_featured or is_featured={false,true}
Filter on whether an item is featured or not.
is_published or is_published={false,true,all}
Filter on whether an item is published or not.
Filter on whether an item is publicly visible or not.
is_findable or is_findable={false,true}
Filter on whether an item is findable or not.
is_unfindable or is_unfindable={false,true}
Filter on whether an item is unfindable or not.
upcoming
Filter on items whose start date lies in the future
ongoing
Filter on items whose date range is around the current date
finished
Filter on items whose end date lies in the past
Filter on items whose start date lies in the future
Filter on items whose start date lies in the future or don't have an end_date
authoritative={true|false}
Filter on items which are authoritative or not
creator_id=<rsc id>
Filter on items which are created by <rsc id>
modifier_id=<rsc id>
Filter on items which are last modified by <rsc id>
qargs
Add all query terms from the current query arguments
query_id=<rsc id>
Get the query terms from given resource ID, and use those terms.
rsc_id=<rsc id>
Filter to *only* include the given rsc id. Can be used for resource existence check.
name=<name-pattern>
Filter on the unique name of a resource.
language=<iso-code>
Filter on the presence of a translation
Unknown iso code, ignore
sort=fieldname
Order by a given field. Putting a '-' in front of the field name reverts the ordering.
facet.foo=value
Add a join with the search_facet table.
text=...
Perform a fulltext search
match_objects=<id>
Similar to the {match_objects id=...} query.
date_start_after=date
Filter on date_start after a specific date.
date_start_after=date
Filter on date_start before a specific date.
date_start_year=year
Filter on year of start date
date_end_after=date
Filter on date_end after a specific date.
date_end_after=date
Filter on date_end before a specific date.
publication_month=month
No match found
Helper functions
Add a join on the hierarchy table.
A = "h" ++ integer_to_list(length(Search#search_sql.tables)),
Search4 = add_where(
A ++ ".name = " ++ NameArg ++ " AND "
++ A ++ ".lft >= " ++ LftArg ++ " AND "
++ A ++ ".rght <= " ++ RghtArg, Search3),
Search4#search_sql{
tables=Search1#search_sql.tables ++ [{hierarchy, A}],
from=Search1#search_sql.from ++ ", hierarchy " ++ A
}.
Add an ORDER clause.
Make sure that parts of the query are safe to append to the search query.
Make sure the input is a list of valid categories.
Make a single category a list
Split strings with comma separated lists of categories
Make sure the given name is a category.
If the current user is an administrator or editor, show an error message about this search
display_error(Msg, Context) ->
true ->
false ->
ok
end.
'OR' query for all values
Add filters
Clause is already supposed to be safe.
Convert an expression like [123,hasdocument]
Support routine for "hasanyobject" | @author < >
2009 - 2023
@doc Handler for m.search[{query , .. } ]
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
distributed under the License is distributed on an " AS IS " BASIS ,
-module(search_query).
-author("Arjan Scherpenisse <>").
-export([
search/2,
parse_request_args/1,
parse_query_text/1,
build_query/2
]).
-export([
qterm/2,
expand_object_predicates/2
]).
-include_lib("zotonic_core/include/zotonic.hrl").
-define(SQL_SAFE_REGEXP, "^[0-9a-zA-Z_\.]+$").
-spec search( map() | proplists:proplist(), z:context() ) -> #search_sql_terms{} | #search_result{}.
search(Query, Context) ->
Query1 = filter_empty(Query),
Query2 = lists:filtermap(
fun
({K, V}) when is_binary(K) ->
case request_arg(K) of
undefined -> false;
A -> {true, {A, V}}
end;
({K, _} = KV) when is_atom(K) ->
{true, KV};
({{filter, _}, _} = KV) ->
{true, KV};
({{facet, _}, _} = KV) ->
{true, KV};
({{custom, _}, _} = KV) ->
{true, KV}
end,
Query1),
Query3 = lists:flatten(
lists:map(
fun
({K, #{ <<"all">> := All, <<"any">> := Any }}) ->
All1 = filter_empty( lists:map(fun(V) -> {K, V} end, All) ),
case lists:filter(fun z_utils:is_empty/1, Any) of
[] -> All1;
Any1 -> [ {K, Any1} | All1 ]
end;
({K, #{ <<"all">> := All }}) ->
filter_empty( lists:map(fun(V) -> {K, V} end, All) );
({K, #{ <<"any">> := Any }}) ->
case lists:filter(fun z_utils:is_empty/1, Any) of
[] -> [];
Any1 -> {K, Any1}
end;
(KV) ->
KV
end,
Query2)),
Query4 = case lists:flatten( proplists:get_all_values(cat, Query3) ) of
[] -> Query3;
Cats -> [{cat, Cats} | proplists:delete(cat, Query3)]
end,
Query5 = case lists:flatten( proplists:get_all_values(cat_exclude, Query4) ) of
[] -> Query4;
CatsX -> [{cat_exclude, CatsX} | proplists:delete(cat_exclude, Query4)]
end,
build_query(lists:sort(Query5), Context).
-spec build_query(list(), z:context()) -> #search_sql_terms{} | #search_result{}.
build_query(Terms, Context) ->
Ts = lists:flatten(lists:map(fun(T) -> qterm(T, Context) end, Terms)),
case lists:member(none, Ts) of
true ->
#search_result{};
false ->
#search_sql_terms{ terms = Ts }
end.
-spec qargs( z:context() ) -> list( {binary(), term()} ).
qargs(Context) ->
Args = z_context:get_q_all_noz(Context),
lists:filtermap(
fun
({<<"qargs">>, _}) -> false;
({<<"qs">>, V}) -> {true, {<<"text">>, V}};
({<<"q", Term/binary>>, V}) -> {true, {Term, V}};
(_) -> false
end,
Args).
-spec parse_request_args( list( {binary(), term()} ) ) -> list( {atom(), term()} ).
parse_request_args(Args) ->
parse_request_args(Args, []).
parse_request_args([], Acc) ->
Acc;
parse_request_args([{K,V}|Rest], Acc) when is_binary(K) ->
case z_context:is_zotonic_arg(K) of
true ->
parse_request_args(Rest, Acc);
false ->
case request_arg(K) of
undefined -> parse_request_args(Rest, Acc);
Arg -> parse_request_args(Rest, [{Arg, V}|Acc])
end
end;
parse_request_args([{K,V}|Rest], Acc) ->
parse_request_args([{z_convert:to_binary(K), V}|Rest], Acc).
@doc Parses a query text . Every line is an argument ; of which the first
-spec parse_query_text( binary() | string() | undefined ) -> list( {atom(), term()} ).
parse_query_text(undefined) ->
[];
parse_query_text(Text) when is_list(Text) ->
parse_query_text(list_to_binary(Text));
parse_query_text(Text) when is_binary(Text) ->
Lines = binary:split(Text, <<"\n">>, [global]),
KVs = [ split_arg(z_string:trim(Line)) || Line <- Lines],
Args = [ {request_arg(K), V} || {K,V} <- KVs, K =/= <<>> ],
[ {K,V} || {K,V} <- Args, K =/= undefined ].
split_arg(<<>>) ->
{<<>>, <<>>};
split_arg(B) ->
case binary:split(B, <<"=">>) of
[K,V] -> {z_string:trim(K), z_string:trim(V)};
[K] -> {z_string:trim(K), <<"true">>}
end.
Convert known request arguments to atoms .
request_arg(<<"content_group">>) -> content_group;
request_arg(<<"visible_for">>) -> visible_for;
request_arg(<<"cat">>) -> cat;
request_arg(<<"cat_exact">>) -> cat_exact;
request_arg(<<"cat_exclude">>) -> cat_exclude;
request_arg(<<"creator_id">>) -> creator_id;
request_arg(<<"modifier_id">>) -> modifier_id;
request_arg(<<"facet.", F/binary>>) -> {facet, F};
request_arg(<<"filter">>) -> filter;
request_arg(<<"filter.facet.", F/binary>>)-> {facet, F};
request_arg(<<"filter.", F/binary>>) -> {filter, F};
request_arg(<<"pivot.", _/binary>> = F)-> {filter, F};
request_arg(<<"pivot_", F/binary>>) -> {filter, <<"pivot.", F/binary>>};
request_arg(<<"id">>) -> id;
request_arg(<<"id_exclude">>) -> id_exclude;
request_arg(<<"hasobject">>) -> hasobject;
request_arg(<<"hasobjectpredicate">>) -> hasobjectpredicate;
request_arg(<<"hassubject">>) -> hassubject;
request_arg(<<"hassubjectpredicate">>) -> hassubjectpredicate;
request_arg(<<"hasanyobject">>) -> hasanyobject;
request_arg(<<"hasmedium">>) -> hasmedium;
request_arg(<<"is_authoritative">>) -> is_authoritative;
request_arg(<<"is_featured">>) -> is_featured;
request_arg(<<"is_published">>) -> is_published;
request_arg(<<"is_public">>) -> is_public;
request_arg(<<"is_findable">>) -> is_findable;
request_arg(<<"is_unfindable">>) -> is_unfindable;
request_arg(<<"date_start_after">>) -> date_start_after;
request_arg(<<"date_start_before">>) -> date_start_before;
request_arg(<<"date_start_year">>) -> date_start_year;
request_arg(<<"date_end_after">>) -> date_end_after;
request_arg(<<"date_end_before">>) -> date_end_before;
request_arg(<<"date_end_year">>) -> date_end_year;
request_arg(<<"publication_month">>) -> publication_month;
request_arg(<<"publication_year">>) -> publication_year;
request_arg(<<"publication_after">>) -> publication_after;
request_arg(<<"publication_before">>) -> publication_before;
request_arg(<<"qargs">>) -> qargs;
request_arg(<<"query_id">>) -> query_id;
request_arg(<<"rsc_id">>) -> rsc_id;
request_arg(<<"name">>) -> name;
request_arg(<<"language">>) -> language;
request_arg(<<"sort">>) -> sort;
request_arg(<<"asort">>) -> asort;
request_arg(<<"zsort">>) -> zsort;
request_arg(<<"text">>) -> text;
request_arg(<<"match_objects">>) -> match_objects;
request_arg(<<"match_object_ids">>) -> match_object_ids;
request_arg(<<"upcoming">>) -> upcoming;
request_arg(<<"ongoing">>) -> ongoing;
request_arg(<<"finished">>) -> finished;
request_arg(<<"unfinished">>) -> unfinished;
request_arg(<<"unfinished_or_nodate">>)-> unfinished_or_nodate;
request_arg(<<"page">>) -> undefined;
request_arg(<<"pagelen">>) -> undefined;
request_arg(<<"options">>) -> undefined;
request_arg(<<"custompivot">>) ->
?LOG_ERROR(#{
in => zotonic_mod_search,
text => <<"The query term 'custompivot' has been removed. Use filters with 'pivot.pivotname.field' instead.">>,
result => error,
reason => unknown_query_term,
term => <<"custompivot">>
}),
throw({error, {unknown_query_term, custompivot}});
request_arg(Term) ->
{custom, Term}.
filter_empty(Q) when is_map(Q) ->
filter_empty(maps:to_list(Q));
filter_empty(Q) when is_list(Q) ->
lists:filter(fun({_, X}) -> not(empty_term(X)) end, Q).
empty_term([]) -> true;
empty_term(<<>>) -> true;
empty_term(undefined) -> true;
empty_term(null) -> true;
empty_term([X, _]) -> empty_term(X);
empty_term(_) -> false.
qterm(undefined, _Context) ->
[];
qterm([], _Context) ->
[];
qterm(Ts, Context) when is_list(Ts) ->
lists:map(fun(T) -> qterm(T, Context) end, Ts);
qterm({cat, Cats}, Context) ->
Cats1 = assure_categories(Cats, Context),
Cats2 = add_or_append(<<"rsc " > > , Cats1 , [ ] ) ,
#search_sql_term{ cats = [ {<<"rsc">>, Cats1}] };
qterm({cat_exclude, Cats}, Context) ->
Cats1 = assure_categories(Cats, Context),
#search_sql_term{ cats_exclude = [ {<<"rsc">>, Cats1} ] };
qterm({cat_exact, Cats}, Context) ->
Cats1 = assure_categories(Cats, Context),
#search_sql_term{ cats_exact = [ {<<"rsc">>, Cats1} ] };
qterm({content_group, ContentGroup}, Context) ->
Include only resources which are member of the given content group ( or one of its children )
Q = #search_sql_term{
extra = [ no_content_group_check ]
},
case rid(ContentGroup, Context) of
any ->
Q;
undefined ->
none;
CGId ->
case m_rsc:is_a(CGId, content_group, Context) of
true ->
List = m_hierarchy:contains(<<"content_group">>, ContentGroup, Context),
case m_rsc:p_no_acl(CGId, name, Context) of
<<"default_content_group">> ->
Q#search_sql_term{
where = [
<<"(rsc.content_group_id = any(">>, '$1',
<<"::int[]) or rsc.content_group_id is null)">>
],
args = [
List
]
};
_ ->
Q#search_sql_term{
where = [
<<"rsc.content_group_id = any(">>, '$1',
<<"::int[])">>
],
args = [
List
]
}
end;
false ->
Q#search_sql_term{
where = [
<<"rsc.content_group_id = ">>, '$1'
],
args = [
CGId
]
}
end
end;
qterm({visible_for, VisFor}, _Context) when is_list(VisFor) ->
try
VisFor1 = lists:map(fun z_convert:to_integer/1, VisFor),
VisFor2 = lists:filter(fun is_integer/1, VisFor1),
#search_sql_term{
where = [ <<"rsc.visible_for = any(">>, '$1', <<"::int[])">> ],
args = [ VisFor2 ]
}
catch
error:badarg ->
?LOG_WARNING(#{
in => zotonic_mod_search,
text => <<"Search: error converting visible_for search term">>,
result => error,
reason => badarg,
visible_for => VisFor
}),
[]
end;
qterm({visible_for, VisFor}, _Context) ->
try
case z_convert:to_integer(VisFor) of
undefined ->
[];
VisFor1 ->
#search_sql_term{
where = [ <<"rsc.visible_for = ">>, '$1'],
args = [ VisFor1 ]
}
end
catch
error:badarg ->
?LOG_WARNING(#{
in => zotonic_mod_search,
text => <<"Search: error converting visible_for search term">>,
result => error,
reason => badarg,
visible_for => VisFor
}),
[]
end;
qterm({id_exclude, Ids}, Context) when is_list(Ids) ->
RscIds = lists:filtermap(
fun(Id) ->
case m_rsc:rid(Id, Context) of
undefined -> false;
RscId -> {true, RscId}
end
end,
Ids),
#search_sql_term{
where = [
<<"rsc.id <> any(">>, '$1', <<"::int[])">>
],
args = [ RscIds ]
};
qterm({id_exclude, Id}, Context) ->
case m_rsc:rid(Id, Context) of
undefined ->
[];
RscId ->
#search_sql_term{
where = [ <<"rsc.id <> ">>, '$1'],
args = [ RscId ]
}
end;
qterm({id, Ids}, Context) when is_list(Ids) ->
RscIds = lists:filtermap(
fun(Id) ->
case m_rsc:rid(Id, Context) of
undefined -> false;
RscId -> {true, RscId}
end
end,
Ids),
#search_sql_term{
where = [
<<"rsc.id = any(">>, '$1', <<"::int[])">>
],
args = [ RscIds ]
};
qterm({id, Id}, Context) ->
case m_rsc:rid(Id, Context) of
undefined ->
[];
RscId ->
#search_sql_term{
where = [ <<"rsc.id = ">>, '$1' ],
args = [ RscId ]
}
end;
qterm({hasmedium, HasMedium}, _Context) ->
case z_convert:to_bool(HasMedium) of
true ->
#search_sql_term{
join_inner = #{
<<"medium">> => {<<"medium">>, <<"medium.id = rsc.id">>}
}
};
false ->
#search_sql_term{
join_left = #{
<<"medium">> => {<<"medium">>, <<"medium.id = rsc.id">>}
},
where = [
<<"medium.id is null ">>
]
}
end;
qterm({hassubject, Id}, Context) ->
parse_edges(hassubject, maybe_split_list(Id), Context);
qterm({hasobject, Id}, Context) ->
parse_edges(hasobject, maybe_split_list(Id), Context);
qterm({hasanyobject, ObjPreds}, Context) ->
OPs = expand_object_predicates(ObjPreds, Context),
Alias = edge_alias(),
OPClauses = [ object_predicate_clause(Alias, Obj, Pred) || {Obj, Pred} <- OPs ],
#search_sql_term{
where = [
"rsc.id in (select ", Alias ,".subject_id from edge ",Alias," where (",
lists:join(") or (", OPClauses),
"))"
]
};
qterm({hasobjectpredicate, Predicate}, Context) ->
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".subject_id = rsc.id ">>,
<<" and ">>, Alias, <<".predicate_id = ">>, '$1'
],
args = [
predicate_to_id(Predicate, Context)
]
};
qterm({hassubjectpredicate, Predicate}, Context) ->
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".object_id = rsc.id ">>,
<<" and ">>, Alias, <<".predicate_id = ">>, '$1'
],
args = [
predicate_to_id(Predicate, Context)
]
};
qterm({is_featured, Boolean}, _Context) ->
#search_sql_term{
where = [
<<"rsc.is_featured = ">>, '$1'
],
args = [
z_convert:to_bool(Boolean)
]
};
qterm({is_published, Boolean}, _Context) ->
case z_convert:to_binary(Boolean) of
<<"all">> ->
#search_sql_term{
extra = [ no_publish_check ]
};
_ ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
extra = [ no_publish_check ],
where = [
<<"rsc.is_published = true and "
"rsc.publication_start <= now() and "
"rsc.publication_end >= now()">>
]
};
false ->
#search_sql_term{
extra = [ no_publish_check ],
where = [
<<"(rsc.is_published = false or "
"rsc.publication_start > now() or "
"rsc.publication_end < now())">>
]
}
end
end;
qterm({is_public, Boolean}, _Context) ->
is_public or is_public={false , true , all }
TODO : Adapt this for the different ACL modules
case z_convert:to_binary(Boolean) of
<<"all">> ->
[];
_ ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.visible_for = 0">>
]
};
false ->
#search_sql_term{
where = [
<<"rsc.visible_for > 0">>
]
}
end
end;
qterm({is_findable, Boolean}, _Context) ->
#search_sql_term{
where = [
<<"rsc.is_unfindable = ">>, '$1'
],
args = [
not z_convert:to_bool(Boolean)
]
};
qterm({is_unfindable, Boolean}, _Context) ->
#search_sql_term{
where = [
<<"rsc.is_unfindable = ">>, '$1'
],
args = [
z_convert:to_bool(Boolean)
]
};
qterm({upcoming, Boolean}, _Context) ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_start >= current_timestamp">>
]
};
false ->
[]
end;
qterm({ongoing, Boolean}, _Context) ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_start <= current_timestamp ",
"and rsc.pivot_date_end >= current_timestamp">>
]
};
false ->
[]
end;
qterm({finished, Boolean}, _Context) ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_end < current_timestamp">>
]
};
false ->
[]
end;
qterm({unfinished, Boolean}, _Context) ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"rsc.pivot_date_end >= current_timestamp">>
]
};
false ->
[]
end;
qterm({unfinished_or_nodate, Boolean}, _Context) ->
case z_convert:to_bool(Boolean) of
true ->
#search_sql_term{
where = [
<<"(rsc.pivot_date_end >= current_date "
"or rsc.pivot_date_start is null)">>
]
};
false ->
[]
end;
qterm({is_authoritative, Boolean}, _Context) ->
#search_sql_term{
where = [
<<"rsc.is_authoritative = ">>, '$1'
],
args = [
z_convert:to_bool(Boolean)
]
};
qterm({creator_id, Id}, Context) ->
#search_sql_term{
where = [
<<"rsc.creator_id = ">>, '$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
qterm({modifier_id, Id}, Context) ->
#search_sql_term{
where = [
<<"rsc.modifier_id = ">>, '$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
qterm({qargs, Boolean}, Context) ->
case z_convert:to_bool(Boolean) of
true ->
Terms = parse_request_args(qargs(Context)),
qterm(Terms, Context);
false ->
[]
end;
qterm({query_id, Id}, Context) ->
QArgs = try
parse_query_text(z_html:unescape(m_rsc:p(Id, 'query', Context)))
catch
throw:{error,{unknown_query_term,Term}}:S ->
?LOG_ERROR(#{
text => <<"Unknown query term in search query">>,
in => zotonic_mod_search,
result => error,
reason => unknown_query_term,
query_id => Id,
term => Term,
stack => S
}),
[]
end,
qterm(QArgs, Context);
qterm({rsc_id, Id}, Context) ->
#search_sql_term{
where = [
<<"rsc.id = ">>, '$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
qterm({name, Name}, Context) ->
case z_string:to_lower(mod_search:trim(z_convert:to_binary(Name), Context)) of
All when All =:= <<>>; All =:= <<"*">>; All =:= <<"%">> ->
#search_sql_term{
where = [
<<"rsc.name is not null">>
]
};
Name1 ->
Name2 = binary:replace(Name1, <<"*">>, <<"%">>, [global]),
#search_sql_term{
where = [
<<"rsc.name like ">>, '$1'
],
args = [
Name2
]
}
end;
qterm({language, []}, _Context) ->
[];
qterm({language, [ Lang | _ ] = Langs}, Context) when is_list(Lang) ->
lists:map(
fun(Code) ->
qterm({language, Code}, Context)
end,
Langs);
qterm({language, [ Lang | _ ] = Langs}, _Context) when is_atom(Lang); is_binary(Lang) ->
Langs1 = lists:map(
fun(Lng) ->
case z_language:to_language_atom(Lng) of
{ok, Code} ->
z_convert:to_binary(Code);
{error, _} ->
<<"x-none">>
end
end,
Langs),
#search_sql_term{
where = [
<<"rsc.language && ">>, '$1'
],
args = [ Langs1 ]
};
qterm({language, Lang}, _Context) ->
case z_language:to_language_atom(Lang) of
{ok, Code} ->
#search_sql_term{
where = [
<<"rsc.language @> ">>, '$1'
],
args = [
[ z_convert:to_binary(Code) ]
]
};
{error, _} ->
[]
end;
qterm({sort, Sort}, _Context) ->
sort_term(Sort);
qterm({asort, Sort}, _Context) ->
asort_term(Sort);
qterm({zsort, Sort}, _Context) ->
zsort_term(Sort);
qterm({{facet, Field}, <<"[", _>> = V}, Context) ->
V1 = maybe_split_list(V),
qterm({{facet, Field}, V1}, Context);
qterm({{facet, Field}, V}, Context) ->
case search_facet:qterm(sql_safe(Field), V, Context) of
{ok, Res1} ->
Res1;
{error, _} ->
none
end;
qterm({filter, R}, Context) ->
add_filters(R, Context);
qterm({{filter, Field}, V}, Context) ->
{Tab, Alias, Col, Q1} = map_filter_column(Field, #search_sql_term{}),
case pivot_qterm(Tab, Alias, Col, V, Q1, Context) of
{ok, QTerm} ->
QTerm;
{error, _} ->
none
end;
qterm({text, Text}, Context) ->
case mod_search:trim(z_convert:to_binary(Text), Context) of
<<>> ->
[];
<<"id:", S/binary>> ->
#search_sql_term{
where = [
<<"rsc.id">>, '$1'
],
args = [
m_rsc:rid(S, Context)
]
};
_ ->
TsQuery = mod_search:to_tsquery(Text, Context),
#search_sql_term{
where = [
'$1', <<"@@ rsc.pivot_tsv">>
],
sort = [
[
"ts_rank_cd(", mod_search:rank_weight(Context),
", rsc.pivot_tsv, ", '$1', ", ", '$2', ") desc"
]
],
args = [
TsQuery,
mod_search:rank_behaviour(Context)
]
}
end;
qterm({match_objects, RId}, Context) ->
Match on the objects of the resource , best matching return first .
case m_rsc:rid(RId, Context) of
undefined ->
none;
Id ->
ObjectIds = m_edge:objects(Id, Context),
qterm([
{match_object_ids, ObjectIds},
{id_exclude, Id}
], Context)
end;
qterm({match_object_ids, ObjectIds}, Context) ->
ObjectIds1 = [ m_rsc:rid(OId, Context) || OId <- lists:flatten(ObjectIds) ],
MatchTerms = [ ["zpo",integer_to_list(ObjId)] || ObjId <- ObjectIds1, is_integer(ObjId) ],
TsQuery = lists:flatten(lists:join("|", MatchTerms)),
case TsQuery of
[] ->
none;
_ ->
#search_sql_term{
tables = #{
<<"matchquery">> => [ <<"to_tsquery(">>, '$1', <<")">> ]
},
where = [
<<"matchquery @@ rsc.pivot_rtsv">>
],
sort = [
<<"ts_rank(rsc.pivot_rtsv, matchquery) desc">>
],
args = [
TsQuery
]
}
end;
qterm({date_start_after, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.pivot_date_start >= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_start_before, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.pivot_date_start <= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_start_year, Year}, _Context) ->
#search_sql_term{
where = [
<<"date_part('year', rsc.pivot_date_start) ">>, '$1'
],
args = [
z_convert:to_integer(Year)
]
};
qterm({date_end_after, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.pivot_date_end >= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_end_before, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.pivot_date_end <= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({date_end_year, Year}, _Context) ->
date_end_year = year
Filter on year of end date
#search_sql_term{
where = [
<<"date_part('year', rsc.pivot_date_end) = ">>, '$1'
],
args = [
z_convert:to_integer(Year)
]
};
qterm({publication_year, Year}, _Context) ->
publication_year = year
Filter on year of publication
#search_sql_term{
where = [
<<"date_part('year', rsc.publication_start) = ">>, '$1'
],
args = [
z_convert:to_integer(Year)
]
};
qterm({publication_month, Month}, _Context) ->
Filter on month of publication
#search_sql_term{
where = [
<<"date_part('month', rsc.publication_start) = ">>, '$1'
],
args = [
z_convert:to_integer(Month)
]
};
qterm({publication_after, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.publication_start >= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({publication_before, Date}, Context) ->
#search_sql_term{
where = [
<<"rsc.publication_start <= ">>, '$1'
],
args = [
z_datetime:to_datetime(Date, Context)
]
};
qterm({{custom, Term}, Arg}, Context) ->
case z_notifier:first(#search_query_term{ term = Term, arg = Arg }, Context) of
undefined ->
?LOG_WARNING(#{
in => zotonic_mod_search,
text => <<"Ignored unknown query search term">>,
term => Term,
arg => Arg,
result => error,
reason => unknown_query_term
}),
[];
[] ->
[];
#search_sql_term{} = SQL ->
SQL
end;
qterm(Term, _Context) ->
throw({error, {unknown_query_term, Term}}).
@doc Parse hassubject and hasobject edges .
-spec parse_edges(hassubject | hasobject, list(), z:context()) -> #search_sql_term{}.
parse_edges(Term, [[Id, Predicate]], Context) ->
parse_edges(Term, [[Id, Predicate, "rsc"]], Context);
parse_edges(hassubject, [[Id, Predicate, JoinAlias]], Context) ->
Alias = edge_alias(),
JoinAlias1 = sql_safe(JoinAlias),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".object_id = ">>, JoinAlias1, <<".id">>,
<<" and ">>, Alias, <<".subject_id = ">>, '$1',
<<" and ">>, Alias, <<".predicate_id = ">>, '$2'
],
args = [
m_rsc:rid(Id, Context),
predicate_to_id(Predicate, Context)
]
};
parse_edges(hassubject, [Id], Context) ->
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".object_id = rsc.id">>,
<<" and ">>, Alias, <<".subject_id = ">>,
'$1'
],
args = [
m_rsc:rid(Id, Context)
]
};
parse_edges(hasobject, [[Id, Predicate, JoinAlias]], Context) ->
Alias = edge_alias(),
JoinAlias1 = sql_safe(JoinAlias),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".subject_id = ">>, JoinAlias1, <<".id">>,
<<" and ">>, Alias, <<".object_id = ">>, '$1',
<<" and ">>, Alias, <<".predicate_id = ">>, '$2'
],
args = [
m_rsc:rid(Id, Context),
predicate_to_id(Predicate, Context)
]
};
parse_edges(hasobject, [Id], Context) ->
Alias = edge_alias(),
#search_sql_term{
tables = #{
Alias => <<"edge">>
},
where = [
Alias, <<".subject_id = rsc.id">>,
<<" and ">>, Alias, <<".object_id = ">>,
'$1'
],
args = [
m_rsc:rid(Id, Context)
]
}.
edge_alias() ->
Nr = z_ids:identifier(6),
<<"edge_", Nr/binary>>.
add_hierarchy_join(HierarchyName , Lft , Rght , Search ) - >
{ NameArg , Search1 } = add_arg(HierarchyName , Search ) ,
{ LftArg , Search2 } = add_arg(Lft , Search1 ) ,
{ RghtArg , Search3 } = , Search2 ) ,
zsort_term(Sort) ->
T = add_order(Sort, #search_sql_term{}),
#search_sql_term{
zsort = T#search_sql_term.sort
}.
asort_term(Sort) ->
T = add_order(Sort, #search_sql_term{}),
#search_sql_term{
asort = T#search_sql_term.sort
}.
sort_term(Sort) ->
add_order(Sort, #search_sql_term{}).
add_order(<<>>, Search) ->
Search;
add_order([ Order | Os ], Search) when not is_integer(Order) ->
Search1 = add_order(Order, Search),
add_order(Os, Search1);
add_order(Order, Search) when is_atom(Order) ->
add_order(atom_to_binary(Order, utf8), Search);
add_order(Order, Search) when is_list(Order) ->
add_order(list_to_binary(Order), Search);
add_order(<<_, "random">>, Search) ->
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ <<"random()">> ]
};
add_order(<<C, "seq">>, Search) when C =:= $-; C =:= $+ ->
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {edge, C, <<"seq">>}, {edge, C, <<"id">>} ]
};
add_order(<<C, "edge.", Column/binary>>, Search) when C =:= $-; C =:= $+ ->
Column1 = sql_safe(Column),
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {edge, C, Column1} ]
};
add_order(<<C, "pivot.", Pivot/binary>>, Search) when C =:= $-; C =:= $+ ->
case binary:split(Pivot, <<".">>) of
[ PivotTable, Column ] ->
Tab1 = sql_safe(<<"pivot_", PivotTable/binary>>),
Col1 = sql_safe(Column),
Join = Search#search_sql_term.join_inner,
Search#search_sql_term{
join_inner = Join#{
Tab1 => {Tab1, <<Tab1/binary, ".id = rsc.id">>}
},
sort = Search#search_sql_term.sort
++ [ {Tab1, C, Col1} ]
};
[ Column ] ->
Col1 = <<"pivot_", Column/binary>>,
Col2 = sql_safe(Col1),
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {<<"rsc">>, C, Col2} ]
}
end;
add_order(<<C, "facet.", Column/binary>>, Search) when C =:= $-; C =:= $+ ->
Col1 = <<"f_", Column/binary>>,
Col2 = sql_safe(Col1),
Join = Search#search_sql_term.join_inner,
Search#search_sql_term{
join_inner = Join#{
<<"facet">> => {<<"search_facet">>, <<"facet.id = rsc.id">>}
},
sort = Search#search_sql_term.sort
++ [ {<<"facet">>, C, Col2} ]
};
add_order(<<C, Sort/binary>>, Search) when C =:= $-; C =:= $+ ->
Sort1 = sql_safe(Sort),
[ Alias, Column ] = case binary:split(Sort1, <<".">>) of
[ Col ] ->
[ <<"rsc">>, Col ];
[ _, _ ] = AC ->
AC
end,
Search#search_sql_term{
sort = Search#search_sql_term.sort
++ [ {Alias, C, Column} ]
};
add_order(Sort, Search) ->
add_order(<<"+", Sort/binary>>, Search).
sql_safe(String) when is_list(String); is_binary(String) ->
case re:run(String, ?SQL_SAFE_REGEXP) of
{match, _} ->
String;
_ ->
throw({error, {unsafe_expression, String}})
end;
sql_safe(String) ->
sql_safe(z_convert:to_binary(String)).
assure_categories(Name, Context) ->
Cats = assure_cats_list(Name),
Cats1 = assure_cat_flatten(Cats),
lists:foldl(fun(C, Acc) ->
case assure_category(C, Context) of
undefined -> Acc;
error -> ['$error'|Acc];
{ok, N} -> [N|Acc]
end
end,
[],
Cats1).
assure_cats_list(Names) when is_list(Names) ->
Names;
assure_cats_list(Name) ->
[ Name ].
-spec assure_cat_flatten(any() | list()) -> list().
assure_cat_flatten(Names) ->
lists:flatten(
lists:map(
fun
(S) when is_binary(S) ->
binary:split(S, <<",">>, [ global ]);
(Name) ->
Name
end,
Names)).
assure_category(undefined, _) -> undefined;
assure_category(null, _) -> undefined;
assure_category("", _) -> undefined;
assure_category("*", _) -> undefined;
assure_category(<<>>, _) -> undefined;
assure_category(<<"*">>, _) -> undefined;
assure_category(<<$', _/binary>> = Name, Context) ->
case binary:last(Name) of
$' -> assure_category_1(z_string:trim(Name, $'), Context);
_ -> assure_category_1(Name, Context)
end;
assure_category(<<$", _/binary>> = Name, Context) ->
case binary:last(Name) of
$" -> assure_category_1(z_string:trim(Name, $"), Context);
_ -> assure_category_1(Name, Context)
end;
assure_category(Name, Context) ->
assure_category_1(Name, Context).
assure_category_1(Name, Context) ->
case m_category:name_to_id(Name, Context) of
{ok, _Id} ->
{ok, Name};
_ ->
case m_rsc:rid(Name, Context) of
undefined ->
?LOG_NOTICE(#{
text => <<"Query: unknown category">>,
in => zotonic_mod_search,
name => Name
}),
display_error ( [ ? _ _ ( " Unknown category " , Context ) , 32 , $ " , z_html : escape(z_convert : ) ) , $ " ] , Context ) ,
error;
CatId ->
case m_category:id_to_name(CatId, Context) of
undefined ->
?LOG_NOTICE(#{
text => <<"Query: term is not a category">>,
in => zotonic_mod_search,
name => Name
}),
display_error ( [ $ " , z_html : escape(z_convert : ) ) , $ " , 32 , ? _ _ ( " is not a category " , Context ) ] , Context ) ,
error;
Name1 ->
{ok, Name1}
end
end
end.
case z_acl : is_allowed(use , mod_admin , Context ) of
ContextPruned = z_context : prune_for_async(Context ) ,
z_session_page : add_script(z_render : growl_error(Msg , ContextPruned ) ) ;
-spec pivot_qterm(Table, Alias, Column, Value, Q, Context) -> {ok, QResult} | {error, term()}
when Table :: binary(),
Alias :: binary(),
Column :: binary(),
Value :: term(),
Q :: #search_sql_term{},
QResult :: #search_sql_term{},
Context :: z:context().
pivot_qterm(_Tab, _Alias, _Col, [], Q, _Context) ->
{ok, Q};
pivot_qterm(Tab, Alias, Col, [Value], Q, Context) ->
pivot_qterm_1(Tab, Alias, Col, Value, Q, Context);
pivot_qterm(Tab, Alias, Col, Vs, Q, Context) when is_list(Vs) ->
Q2 = lists:foldl(
fun(V, QAcc) ->
case pivot_qterm_1(Tab, Alias, Col, V, QAcc, Context) of
{ok, QAcc1} ->
QAcc1;
{error, _} ->
QAcc
end
end,
Q,
Vs),
Q3 = Q2#search_sql_term{
where = [
<<"(">>,
lists:join(<<" OR ">>, Q2#search_sql_term.where),
<<")">>
]
},
{ok, Q3};
pivot_qterm(Tab, Alias, Col, Value, Q, Context) ->
pivot_qterm_1(Tab, Alias, Col, Value, Q, Context).
pivot_qterm_1(Tab, Alias, Col, Value, Query, Context) ->
{Op, Value1} = extract_op(Value),
case z_db:to_column_value(Tab, Col, Value1, Context) of
{ok, Value2} ->
{ArgN, Query2} = add_term_arg(Value2, Query),
W = [
<<Alias/binary, $., Col/binary>>, Op, ArgN
],
Query3 = Query2#search_sql_term{
where = Query2#search_sql_term.where ++ [ W ]
},
{ok, Query3};
{error, Reason} = Error ->
?LOG_WARNING(#{
text => <<"Pivot value error, dropping query term.">>,
in => zotonic_mod_search,
result => error,
reason => Reason,
table => Tab,
alias => Alias,
column => Col,
value => Value1
}),
Error
end.
add_term_arg(ArgValue, #search_sql_term{ args = Args } = Q) ->
Arg = [$$] ++ integer_to_list(length(Args) + 1),
{list_to_atom(Arg), Q#search_sql_term{args = Args ++ [ ArgValue ]}}.
extract_op(<<"=", V/binary>>) ->
{"=", V};
extract_op(<<">", V/binary>>) ->
{">", V};
extract_op(<<"<", V/binary>>) ->
{"<", V};
extract_op(<<"<=", V/binary>>) ->
{"<=", V};
extract_op(<<">=", V/binary>>) ->
{">=", V};
extract_op(<<"!=", V/binary>>) ->
{"<>", V};
extract_op(<<"<>", V/binary>>) ->
{"<>", V};
extract_op(V) ->
{"=", V}.
add_filters(Filters, Context) ->
add_filters(Filters, #search_sql_term{}, Context).
add_filters(<<"[", _/binary>> = Filter, Q, Context) ->
add_filters(maybe_split_list(Filter), Q, Context);
add_filters([ [Column|_] | _ ] = Filters, Q, Context)
when is_list(Column);
is_binary(Column);
is_atom(Column) ->
add_filters_or(Filters, Q, Context);
add_filters({'or', Filters}, Q, Context) ->
add_filters_or(Filters, Q, Context);
add_filters([Column, Value], R, Context) ->
add_filters([Column, eq, Value], R, Context);
add_filters([Column, Operator, Value], Q, Context) ->
{Tab, Alias, Col, Q1} = map_filter_column(Column, Q),
case z_db:to_column_value(Tab, Col, Value, Context) of
{ok, V1} ->
{Expr, Q2} = create_filter(Tab, Alias, Col, Operator, V1, Q1),
add_filter_where(Expr, Q2);
{error, Reason} ->
?LOG_INFO(#{
text => <<"Search query filter could not be added">>,
result => error,
reason => Reason,
filter_column => Column,
table => Tab,
column => Col,
value => Value
}),
Q
end.
add_filters_or(Filters, Q, Context) ->
{Exprs, Q1} = lists:foldr(
fun(V, Acc) ->
add_filters_or_1(V, Acc, Context)
end,
{[], Q},
Filters),
Or = [ "(", lists:join(<<" or ">>, Exprs), ")" ],
add_filter_where(Or, Q1).
add_filters_or_1([ C, O, V ], {Es, QAcc}, Context) ->
{Tab, Alias, Col, QAcc1} = map_filter_column(C, QAcc),
case z_db:to_column_value(Tab, Col, V, Context) of
{ok, V1} ->
{E, QAcc2} = create_filter(Tab, Alias, Col, O, V1, QAcc1),
{[E|Es], QAcc2};
{error, _} ->
{Es, QAcc}
end;
add_filters_or_1([ C, V ], {Es, QAcc}, Context) ->
add_filters_or_1([ C, eq, V ], {Es, QAcc}, Context).
create_filter(Tab, Alias, Col, Operator, null, Q) ->
create_filter(Tab, Alias, Col, Operator, undefined, Q);
create_filter(_Tab, Alias, Col, Operator, undefined, Q) ->
Operator1 = map_filter_operator(Operator),
{create_filter_null(Alias, Col, Operator1), Q};
create_filter(_Tab, Alias, Col, Operator, Value, Q) ->
{Arg, Q1} = add_filter_arg(Value, Q),
Operator1 = map_filter_operator(Operator),
{[Alias, $., Col, <<" ">>, Operator1, <<" ">>, Arg], Q1}.
map_filter_column(<<"pivot.", P/binary>>, #search_sql_term{ join_inner = Join } = Q) ->
case binary:split(P, <<".">>) of
[ Table, Field ] ->
T1 = sql_safe(Table),
T2 = <<"pivot_", T1/binary>>,
F1 = sql_safe(Field),
Q1 = Q#search_sql_term{
join_inner = Join#{
T2 => {T2, <<T2/binary, ".id = rsc.id">>}
}
},
{T2, T2, F1, Q1};
[ Field ] ->
F1 = z_convert:to_binary(sql_safe(Field)),
{<<"rsc">>, <<"rsc">>, <<"pivot_", F1/binary>>, Q}
end;
map_filter_column(<<"facet.", P/binary>>, #search_sql_term{ join_inner = Join } = Q) ->
Q1 = Q#search_sql_term{
join_inner = Join#{
<<"facet">> => {<<"search_facet">>, <<"facet.id = rsc.id">>}
}
},
Field = sql_safe(P),
{<<"search_facet">>, <<"facet">>, <<"f_", Field/binary>>, Q1};
map_filter_column(Column, Q) ->
Field = sql_safe(Column),
{<<"rsc">>, <<"rsc">>, Field, Q}.
Add an AND clause to the WHERE of a # search_sql_term
add_filter_where(Clause, #search_sql_term{ where = [] } = Q) ->
Q#search_sql_term{ where = Clause };
add_filter_where(Clause, #search_sql_term{ where = C } = Q) ->
Q#search_sql_term{ where = [ C, <<" and ">>, Clause ] }.
Append an argument to a # search_sql_term
add_filter_arg(ArgValue, #search_sql_term{ args = Args } = Q) ->
Arg = [$$] ++ integer_to_list(length(Args) + 1),
{list_to_atom(Arg), Q#search_sql_term{args = Args ++ [ ArgValue ]}}.
create_filter_null(Alias, Col, "=") ->
[ Alias, $., Col, <<" is null">> ];
create_filter_null(Alias, Col, "<>") ->
[ Alias, $., Col, <<" is not null">> ];
create_filter_null(_Tab, _Col, _Op) ->
"false".
map_filter_operator(eq) -> "=";
map_filter_operator('=') -> "=";
map_filter_operator(ne) -> "<>";
map_filter_operator('<>') -> "<>";
map_filter_operator(gt) -> ">";
map_filter_operator('>') -> ">";
map_filter_operator(lt) -> "<";
map_filter_operator('<') -> "<";
map_filter_operator(gte) -> ">=";
map_filter_operator('>=') -> ">=";
map_filter_operator(lte) -> "<=";
map_filter_operator('<=') -> "<=";
map_filter_operator("=") -> "=";
map_filter_operator("<>") -> "<>";
map_filter_operator(">") -> ">";
map_filter_operator("<") -> "<";
map_filter_operator(">=") -> ">=";
map_filter_operator("<=") -> "<=";
map_filter_operator(<<"=">>) -> "=";
map_filter_operator(<<"<>">>) -> "<>";
map_filter_operator(<<">">>) -> ">";
map_filter_operator(<<"<">>) -> "<";
map_filter_operator(<<">=">>) -> ">=";
map_filter_operator(<<"<=">>) -> "<=";
map_filter_operator(Op) -> throw({error, {unknown_filter_operator, Op}}).
maybe_split_list(<<"[", _/binary>> = Term) ->
unquote_all(search_parse_list:parse(Term));
maybe_split_list("[" ++ _ = Term) ->
unquote_all(search_parse_list:parse(Term));
maybe_split_list(Other) ->
[ Other ].
unquote_all(L) when is_list(L) ->
lists:map(fun unquote_all/1, L);
unquote_all(B) when is_binary(B) ->
unquot(z_string:trim(B));
unquote_all(T) ->
T.
unquot(<<C, Rest/binary>>) when C =:= $'; C =:= $"; C =:= $` ->
binary:replace(Rest, <<C>>, <<>>);
unquot([C|Rest]) when C =:= $'; C =:= $"; C =:= $` ->
[ X || X <- Rest, X =/= C ];
unquot(B) ->
B.
Expand the argument for hasanyobject , make pairs of { , PredicateId }
expand_object_predicates(Bin, Context) when is_binary(Bin) ->
map_rids(search_parse_list:parse(Bin), Context);
expand_object_predicates(OPs, Context) ->
map_rids(OPs, Context).
map_rids({rsc_list, L}, Context) ->
map_rids(L, Context);
map_rids(L, Context) when is_list(L) ->
[ map_rid(unquot(X),Context) || X <- L, X =/= <<>> ];
map_rids(Id, Context) ->
map_rid(Id, Context).
map_rid([], _Context) -> {any, any};
map_rid([Obj,Pred|_], Context) -> {rid(Obj,Context),rid(Pred,Context)};
map_rid([Obj], Context) -> {rid(Obj, Context), any};
map_rid(Obj, Context) -> {rid(Obj, Context), any}.
rid(undefined, _Context) -> undefined;
rid(<<"*">>, _Context) -> any;
rid('*', _Context) -> any;
rid("*", _Context) -> any;
rid("", _Context) -> any;
rid(<<>>, _Context) -> any;
rid(Id, _Context) when is_integer(Id) -> Id;
rid(Id, Context) -> m_rsc:rid(Id, Context).
predicate_to_id([$'|_] = Name, Context) ->
case lists:last(Name) of
$' -> predicate_to_id_1(z_string:trim(Name, $'), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id([$"|_] = Name, Context) ->
case lists:last(Name) of
$" -> predicate_to_id_1(z_string:trim(Name, $"), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id(<<$', _/binary>> = Name, Context) ->
case binary:last(Name) of
$' -> predicate_to_id_1(z_string:trim(Name, $'), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id(<<$", _/binary>> = Name, Context) ->
case binary:last(Name) of
$" -> predicate_to_id_1(z_string:trim(Name, $"), Context);
_ -> predicate_to_id_1(Name, Context)
end;
predicate_to_id(Pred, Context) ->
predicate_to_id_1(Pred, Context).
predicate_to_id_1(Pred, Context) ->
case m_predicate:name_to_id(Pred, Context) of
{ok, Id} ->
Id;
{error, _} ->
?LOG_NOTICE(#{
text => <<"Query: unknown predicate">>,
in => zotonic_mod_search,
predicate => Pred
}),
display_error ( [ ? _ _ ( " Unknown predicate " , Context ) , 32 , $ " , z_html : escape(z_convert : to_binary(Pred ) ) , $ " ] , Context ) ,
0
end.
object_predicate_clause(_Alias, undefined, undefined) ->
"false";
object_predicate_clause(_Alias, _Object, undefined) ->
"false";
object_predicate_clause(_Alias, undefined, _Predicate) ->
"false";
object_predicate_clause(Alias, any, any) ->
[Alias, ".subject_id = rsc.id"];
object_predicate_clause(Alias, any, PredicateId) when is_integer(PredicateId) ->
[Alias, ".predicate_id = ", integer_to_list(PredicateId)];
object_predicate_clause(Alias, ObjectId, any) when is_integer(ObjectId) ->
[Alias, ".object_id = ", integer_to_list(ObjectId)];
object_predicate_clause(Alias, ObjectId, PredicateId) when is_integer(PredicateId), is_integer(ObjectId) ->
[Alias, ".object_id=", integer_to_list(ObjectId),
" and ", Alias, ".predicate_id=", integer_to_list(PredicateId)].
|
1350b2fa175898d921d6ad21da393d74ac89d8e784042e191cbcc0926ab179e0 | kind2-mc/kind2 | lustreSyntaxChecks.mli | This file is part of the Kind 2 model checker .
Copyright ( c ) 2021 by the Board of Trustees of the University of Iowa
Licensed under the Apache License , Version 2.0 ( the " License " ) ; you
may not use this file except in compliance with the License . You
may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an " AS IS " BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or
implied . See the License for the specific language governing
permissions and limitations under the License .
Copyright (c) 2021 by the Board of Trustees of the University of Iowa
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
*)
* Check various syntactic properties that do not depend on type information
@author
@author Andrew Marmaduke *)
type error_kind = Unknown of string
| UndefinedLocal of HString.t
| DuplicateLocal of HString.t
| DuplicateOutput of HString.t
| UndefinedNode of HString.t
| UndefinedContract of HString.t
| DanglingIdentifier of HString.t
| QuantifiedVariableInNodeArgument of HString.t * HString.t
| SymbolicArrayIndexInNodeArgument of HString.t * HString.t
| NodeCallInFunction of HString.t
| NodeCallInRefinableContract of string * HString.t
| IllegalTemporalOperator of string * string
| IllegalImportOfStatefulContract of HString.t
| UnsupportedClockedInputOrOutput
| UnsupportedClockedLocal of HString.t
| UnsupportedExpression of LustreAst.expr
| UnsupportedOutsideMerge of LustreAst.expr
| UnsupportedWhen of LustreAst.expr
| UnsupportedParametricDeclaration
| UnsupportedAssignment
| AssumptionVariablesInContractNode
| ClockMismatchInMerge
| MisplacedVarInFrameBlock of LustreAst.ident
type error = [
| `LustreSyntaxChecksError of Lib.position * error_kind
]
val error_message : error_kind -> string
val syntax_check : LustreAst.t -> (LustreAst.t, [> error]) result
val no_mismatched_clock : bool -> LustreAst.expr -> (unit, [> error]) result
(** Conservative syntactic check of clock arguments for merge expressions.
To eventually be replaced with more general clock inference/checking.
Note: type information is needed for this check, causing this check to
be called in the lustreTypeChecker *)
| null | https://raw.githubusercontent.com/kind2-mc/kind2/d34694b4461323322fdcc291aa3c3d9c453fc098/src/lustre/lustreSyntaxChecks.mli | ocaml | * Conservative syntactic check of clock arguments for merge expressions.
To eventually be replaced with more general clock inference/checking.
Note: type information is needed for this check, causing this check to
be called in the lustreTypeChecker | This file is part of the Kind 2 model checker .
Copyright ( c ) 2021 by the Board of Trustees of the University of Iowa
Licensed under the Apache License , Version 2.0 ( the " License " ) ; you
may not use this file except in compliance with the License . You
may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an " AS IS " BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or
implied . See the License for the specific language governing
permissions and limitations under the License .
Copyright (c) 2021 by the Board of Trustees of the University of Iowa
Licensed under the Apache License, Version 2.0 (the "License"); you
may not use this file except in compliance with the License. You
may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
*)
* Check various syntactic properties that do not depend on type information
@author
@author Andrew Marmaduke *)
type error_kind = Unknown of string
| UndefinedLocal of HString.t
| DuplicateLocal of HString.t
| DuplicateOutput of HString.t
| UndefinedNode of HString.t
| UndefinedContract of HString.t
| DanglingIdentifier of HString.t
| QuantifiedVariableInNodeArgument of HString.t * HString.t
| SymbolicArrayIndexInNodeArgument of HString.t * HString.t
| NodeCallInFunction of HString.t
| NodeCallInRefinableContract of string * HString.t
| IllegalTemporalOperator of string * string
| IllegalImportOfStatefulContract of HString.t
| UnsupportedClockedInputOrOutput
| UnsupportedClockedLocal of HString.t
| UnsupportedExpression of LustreAst.expr
| UnsupportedOutsideMerge of LustreAst.expr
| UnsupportedWhen of LustreAst.expr
| UnsupportedParametricDeclaration
| UnsupportedAssignment
| AssumptionVariablesInContractNode
| ClockMismatchInMerge
| MisplacedVarInFrameBlock of LustreAst.ident
type error = [
| `LustreSyntaxChecksError of Lib.position * error_kind
]
val error_message : error_kind -> string
val syntax_check : LustreAst.t -> (LustreAst.t, [> error]) result
val no_mismatched_clock : bool -> LustreAst.expr -> (unit, [> error]) result
|
370a5c280d91e2f89ffa3047b59a765dc5bd94339b48bb1c4ed46a5ce0499aab | cojna/iota | Safe.hs | {-# LANGUAGE BangPatterns #-}
# LANGUAGE CPP #
{-# LANGUAGE Safe #-}
module Data.IntMod.Safe where
import Data.Bits
import Data.Int
import Data.Ratio
#define MOD 1000000007
modulus :: (Num a) => a
modulus = MOD
# INLINE modulus #
infixr 8 ^%
infixl 7 *%, /%
infixl 6 +%, -%
(+%) :: Int64 -> Int64 -> Int64
x +% y = case x + y of
r
| r < MOD -> r
| otherwise -> r - MOD
# INLINE ( + % ) #
(-%) :: Int64 -> Int64 -> Int64
x -% y = case x - y of
r
| r < 0 -> r + MOD
| otherwise -> r
{-# INLINE (-%) #-}
(*%) :: Int64 -> Int64 -> Int64
x *% y = x * y `rem` MOD
{-# INLINE (*%) #-}
{- |
>>> 1 /% 0
0
-}
(/%) :: Int64 -> Int64 -> Int64
x /% y = go y MOD 1 0
where
go !a !b !u !v
| b > 0 = case a `quot` b of
q -> go b (a - q * b) v (u - q * v)
| otherwise = x * (u + MOD) `rem` MOD
{-# INLINE (/%) #-}
(^%) :: Int64 -> Int -> Int64
x ^% n
| n > 0 = go 1 x n
| n == 0 = 1
| otherwise = go 1 (1 /% x) (- n)
where
go !acc !y !m
| m .&. 1 == 0 = go acc (y *% y) (unsafeShiftR m 1)
| m == 1 = acc *% y
| otherwise = go (acc *% y) (y *% y) (unsafeShiftR (m - 1) 1)
newtype IntMod = IntMod {getIntMod :: Int64} deriving (Eq, Ord)
intMod :: (Integral a) => a -> IntMod
intMod x = fromIntegral $ mod (toInteger x) MOD
# INLINE intMod #
intModValidate :: IntMod -> Bool
intModValidate (IntMod x) = 0 <= x && x < MOD
# INLINE intModValidate #
instance Show IntMod where
show (IntMod x) = show x
instance Bounded IntMod where
minBound = IntMod 0
maxBound = IntMod (MOD - 1)
instance Enum IntMod where
toEnum = intMod
fromEnum = fromIntegral
instance Real IntMod where
toRational (IntMod x) = toRational x
instance Integral IntMod where
quotRem x y = (x / y, x - x / y * y)
toInteger (IntMod x) = toInteger x
instance Num IntMod where
(IntMod x) + (IntMod y) = IntMod (x +% y)
(IntMod x) - (IntMod y) = IntMod (x -% y)
(IntMod x) * (IntMod y) = IntMod (x *% y)
abs = id
signum = const (IntMod 1)
fromInteger x = IntMod . fromInteger $ mod x modulus
instance Fractional IntMod where
(IntMod x) / (IntMod y) = IntMod (x /% y)
fromRational q = fromInteger (numerator q) / fromInteger (denominator q)
| null | https://raw.githubusercontent.com/cojna/iota/6d2ad5b71b1b50bca9136d6ed84f80a0b7713d7c/src/Data/IntMod/Safe.hs | haskell | # LANGUAGE BangPatterns #
# LANGUAGE Safe #
# INLINE (-%) #
# INLINE (*%) #
|
>>> 1 /% 0
0
# INLINE (/%) # | # LANGUAGE CPP #
module Data.IntMod.Safe where
import Data.Bits
import Data.Int
import Data.Ratio
#define MOD 1000000007
modulus :: (Num a) => a
modulus = MOD
# INLINE modulus #
infixr 8 ^%
infixl 7 *%, /%
infixl 6 +%, -%
(+%) :: Int64 -> Int64 -> Int64
x +% y = case x + y of
r
| r < MOD -> r
| otherwise -> r - MOD
# INLINE ( + % ) #
(-%) :: Int64 -> Int64 -> Int64
x -% y = case x - y of
r
| r < 0 -> r + MOD
| otherwise -> r
(*%) :: Int64 -> Int64 -> Int64
x *% y = x * y `rem` MOD
(/%) :: Int64 -> Int64 -> Int64
x /% y = go y MOD 1 0
where
go !a !b !u !v
| b > 0 = case a `quot` b of
q -> go b (a - q * b) v (u - q * v)
| otherwise = x * (u + MOD) `rem` MOD
(^%) :: Int64 -> Int -> Int64
x ^% n
| n > 0 = go 1 x n
| n == 0 = 1
| otherwise = go 1 (1 /% x) (- n)
where
go !acc !y !m
| m .&. 1 == 0 = go acc (y *% y) (unsafeShiftR m 1)
| m == 1 = acc *% y
| otherwise = go (acc *% y) (y *% y) (unsafeShiftR (m - 1) 1)
newtype IntMod = IntMod {getIntMod :: Int64} deriving (Eq, Ord)
intMod :: (Integral a) => a -> IntMod
intMod x = fromIntegral $ mod (toInteger x) MOD
# INLINE intMod #
intModValidate :: IntMod -> Bool
intModValidate (IntMod x) = 0 <= x && x < MOD
# INLINE intModValidate #
instance Show IntMod where
show (IntMod x) = show x
instance Bounded IntMod where
minBound = IntMod 0
maxBound = IntMod (MOD - 1)
instance Enum IntMod where
toEnum = intMod
fromEnum = fromIntegral
instance Real IntMod where
toRational (IntMod x) = toRational x
instance Integral IntMod where
quotRem x y = (x / y, x - x / y * y)
toInteger (IntMod x) = toInteger x
instance Num IntMod where
(IntMod x) + (IntMod y) = IntMod (x +% y)
(IntMod x) - (IntMod y) = IntMod (x -% y)
(IntMod x) * (IntMod y) = IntMod (x *% y)
abs = id
signum = const (IntMod 1)
fromInteger x = IntMod . fromInteger $ mod x modulus
instance Fractional IntMod where
(IntMod x) / (IntMod y) = IntMod (x /% y)
fromRational q = fromInteger (numerator q) / fromInteger (denominator q)
|
880b2ad4e4341f8b01efd7495a0af199b15bdceb86c784e85a7e450901fe36c4 | nd/bird | 8.4.4.hs | Complexity of foldr insert empty would be n . log n, while complexity of mkBag is n | null | https://raw.githubusercontent.com/nd/bird/06dba97af7cfb11f558eaeb31a75bd04cacf7201/ch08/8.4.4.hs | haskell | Complexity of foldr insert empty would be n . log n, while complexity of mkBag is n |
|
5b76f5ca59358a5561f93df4cc78b58c42ff165c1c196b71e43f61aa73e8215a | lspitzner/exference | Monoid.hs | module Data.Monoid where
class Monoid a where
mempty :: a
mappend :: a -> a -> a
mconcat :: [a] -> a
instance Monoid Ordering
instance Monoid ()
instance Monoid Any
instance Monoid All
instance Monoid Event
instance Monoid [a]
instance Monoid a => Monoid (Maybe a)
instance Monoid (Last a)
instance Monoid (First a)
instance Prelude.Num a => Monoid (Product a)
instance Prelude.Num a => Monoid (Sum a)
instance Monoid (Endo a)
instance Monoid a => Monoid (Dual a)
-- instance Monoid b => Monoid (a -> b)
instance (Monoid a, Monoid b) => Monoid (a, b)
-- instance Monoid (Proxy * s)
instance Monoid a => Monoid (Const a b)
instance ( * - > Constraint ) Monoid
instance (Monoid a, Monoid b, Monoid c) => Monoid (a, b, c)
instance (Monoid a, Monoid b, Monoid c, Monoid d) => Monoid (a, b, c, d)
instance (Monoid a, Monoid b, Monoid c, Monoid d, Monoid e) => Monoid (a, b, c, d, e)
data Dual a
data Endo a
data All
data Any
data Sum a
data Product a
data First a
data Last a
data Alt f a
| null | https://raw.githubusercontent.com/lspitzner/exference/d8a336f8b9df905e54173339f78ba892daa3f688/environment/Monoid.hs | haskell | instance Monoid b => Monoid (a -> b)
instance Monoid (Proxy * s) | module Data.Monoid where
class Monoid a where
mempty :: a
mappend :: a -> a -> a
mconcat :: [a] -> a
instance Monoid Ordering
instance Monoid ()
instance Monoid Any
instance Monoid All
instance Monoid Event
instance Monoid [a]
instance Monoid a => Monoid (Maybe a)
instance Monoid (Last a)
instance Monoid (First a)
instance Prelude.Num a => Monoid (Product a)
instance Prelude.Num a => Monoid (Sum a)
instance Monoid (Endo a)
instance Monoid a => Monoid (Dual a)
instance (Monoid a, Monoid b) => Monoid (a, b)
instance Monoid a => Monoid (Const a b)
instance ( * - > Constraint ) Monoid
instance (Monoid a, Monoid b, Monoid c) => Monoid (a, b, c)
instance (Monoid a, Monoid b, Monoid c, Monoid d) => Monoid (a, b, c, d)
instance (Monoid a, Monoid b, Monoid c, Monoid d, Monoid e) => Monoid (a, b, c, d, e)
data Dual a
data Endo a
data All
data Any
data Sum a
data Product a
data First a
data Last a
data Alt f a
|
cfb761a46970ee5b44053f74fc18d1c4a78e356ce4429da3bd01cbc74c695adb | candera/vmt | main.cljs | (ns app.main
(:require [weathergen.ipc :as ipc]
[weathergen.falcon.install :as install]))
(def electron (js/require "electron"))
(def app (.-app electron))
(def path (js/require "path"))
(def BrowserWindow (.-BrowserWindow electron))
(def ipcMain (.-ipcMain electron))
(goog-define dev? false)
(defn load-page
"When compiling with `:none` the compiled JS that calls .loadURL is
in a different place than it would be when compiling with optimizations
that produce a single artifact (`:whitespace, :simple, :advanced`).
Because of this we need to dispatch the loading based on the used
optimizations, for this we defined `dev?` above that we can override
at compile time using the `:clojure-defines` compiler option."
[window page]
(.loadURL window (str "file://"
(.join path (.getAppPath app) page)))
#_(if dev?
(.loadURL window (str "file://" js/__dirname "/../../index.html"))
(.loadURL window (str "file://" js/__dirname "/index.html"))))
(def app-window (atom nil))
(def mission-windows (atom {}))
(def installations (atom {}))
(defn mk-window [w h frame? show?]
(BrowserWindow. #js {:width w
:height h
:frame frame?
:show show?
:icon "images/1stVFW_Insignia.png"
:center true}))
(defn open
"Kicks off a renderer opening the thing at `path`. `mode` tells us
whether it's a `:briefing` or a `:mission`."
[path mode]
(let [mission-window (mk-window 1300 800 true false)]
(swap! mission-windows assoc (-> mission-window .-webContents .-id) mission-window)
(load-page mission-window "mission.html")
(-> mission-window
.-webContents
(.on "did-finish-load"
(fn []
(when (-> js/process.env (aget "VMT_DEV_OPEN_WINDOW_EARLY") some?)
(.show mission-window))
(ipc/send-to-renderer mission-window
(mode {:briefing "open-briefing"
:mission "open-mission"})
@installations
path))))
(-> mission-window
.-webContents
(.on "new-window"
(fn [e url]
(-> "electron"
js/require
.-shell
(.openExternal url))
(.preventDefault e))))
(.on mission-window "closed" #(swap! mission-windows dissoc path))))
(defmethod ipc/on-message "open-mission"
[_ event path]
(open path :mission))
(defmethod ipc/on-message "open-briefing"
[_ event path]
(open path :briefing))
(defmethod ipc/on-message "progress-message"
[_ event message]
(ipc/send-to-renderer @app-window "progress-message" message))
(defmethod ipc/on-message "load-complete"
[_ event]
(->> event .-sender .-id (get @mission-windows) .show)
(ipc/send-to-renderer @app-window "load-complete"))
(defmethod ipc/on-message "load-failed"
[_ event err]
(.log js/console "Load failed" err)
(ipc/send-to-renderer @app-window "load-failed" err))
(defn init-browser []
(reset! app-window (mk-window 800 600 true false))
(when (-> js/process.env (aget "VMT_DEV_SHOW_TEST_WINDOW") some?)
(let [test-window (mk-window 1300 800 true false)]
(load-page test-window "test.html")
(-> test-window .-webContents (.on "did-finish-load" (fn [] (.show test-window))))))
(load-page @app-window "index.html")
(-> @app-window .-webContents (.on "did-finish-load" (fn [] (.show @app-window))))
;; Shows how to make a child window that's always on top of the app window:
#_(BrowserWindow. #js {:width 300 :height 200
:backgroundColor "yellow"
:title "detail"
:show true :parent @app-window})
(when dev?
;; TODO: Anything we want to do differently?
)
(.on @app-window "closed" #(reset! app-window nil))
)
;; TODO: Maybe show splash screen
(defn init []
(.on app "window-all-closed" #(when-not (= js/process.platform "darwin") (.quit app)))
(.on app "ready" #(install/locate-installations
(fn [installs]
(reset! installations installs)
(init-browser))))
(set! *main-cli-fn* (fn [] nil)))
| null | https://raw.githubusercontent.com/candera/vmt/8cf450e6c34af87d748152afd7f547b92ae9b38e/src/app/main.cljs | clojure | Shows how to make a child window that's always on top of the app window:
TODO: Anything we want to do differently?
TODO: Maybe show splash screen | (ns app.main
(:require [weathergen.ipc :as ipc]
[weathergen.falcon.install :as install]))
(def electron (js/require "electron"))
(def app (.-app electron))
(def path (js/require "path"))
(def BrowserWindow (.-BrowserWindow electron))
(def ipcMain (.-ipcMain electron))
(goog-define dev? false)
(defn load-page
"When compiling with `:none` the compiled JS that calls .loadURL is
in a different place than it would be when compiling with optimizations
that produce a single artifact (`:whitespace, :simple, :advanced`).
Because of this we need to dispatch the loading based on the used
optimizations, for this we defined `dev?` above that we can override
at compile time using the `:clojure-defines` compiler option."
[window page]
(.loadURL window (str "file://"
(.join path (.getAppPath app) page)))
#_(if dev?
(.loadURL window (str "file://" js/__dirname "/../../index.html"))
(.loadURL window (str "file://" js/__dirname "/index.html"))))
(def app-window (atom nil))
(def mission-windows (atom {}))
(def installations (atom {}))
(defn mk-window [w h frame? show?]
(BrowserWindow. #js {:width w
:height h
:frame frame?
:show show?
:icon "images/1stVFW_Insignia.png"
:center true}))
(defn open
"Kicks off a renderer opening the thing at `path`. `mode` tells us
whether it's a `:briefing` or a `:mission`."
[path mode]
(let [mission-window (mk-window 1300 800 true false)]
(swap! mission-windows assoc (-> mission-window .-webContents .-id) mission-window)
(load-page mission-window "mission.html")
(-> mission-window
.-webContents
(.on "did-finish-load"
(fn []
(when (-> js/process.env (aget "VMT_DEV_OPEN_WINDOW_EARLY") some?)
(.show mission-window))
(ipc/send-to-renderer mission-window
(mode {:briefing "open-briefing"
:mission "open-mission"})
@installations
path))))
(-> mission-window
.-webContents
(.on "new-window"
(fn [e url]
(-> "electron"
js/require
.-shell
(.openExternal url))
(.preventDefault e))))
(.on mission-window "closed" #(swap! mission-windows dissoc path))))
(defmethod ipc/on-message "open-mission"
[_ event path]
(open path :mission))
(defmethod ipc/on-message "open-briefing"
[_ event path]
(open path :briefing))
(defmethod ipc/on-message "progress-message"
[_ event message]
(ipc/send-to-renderer @app-window "progress-message" message))
(defmethod ipc/on-message "load-complete"
[_ event]
(->> event .-sender .-id (get @mission-windows) .show)
(ipc/send-to-renderer @app-window "load-complete"))
(defmethod ipc/on-message "load-failed"
[_ event err]
(.log js/console "Load failed" err)
(ipc/send-to-renderer @app-window "load-failed" err))
(defn init-browser []
(reset! app-window (mk-window 800 600 true false))
(when (-> js/process.env (aget "VMT_DEV_SHOW_TEST_WINDOW") some?)
(let [test-window (mk-window 1300 800 true false)]
(load-page test-window "test.html")
(-> test-window .-webContents (.on "did-finish-load" (fn [] (.show test-window))))))
(load-page @app-window "index.html")
(-> @app-window .-webContents (.on "did-finish-load" (fn [] (.show @app-window))))
#_(BrowserWindow. #js {:width 300 :height 200
:backgroundColor "yellow"
:title "detail"
:show true :parent @app-window})
(when dev?
)
(.on @app-window "closed" #(reset! app-window nil))
)
(defn init []
(.on app "window-all-closed" #(when-not (= js/process.platform "darwin") (.quit app)))
(.on app "ready" #(install/locate-installations
(fn [installs]
(reset! installations installs)
(init-browser))))
(set! *main-cli-fn* (fn [] nil)))
|
31e0b17277df2fd2767e85ac4dc0038cf7bb31492a2de7b9ee1f0c779d32537d | georepl/georepl | main.clj | (ns georepl.main
(:require [clojure.java.io :as io]
[clojure.string :as str]
[georepl.draw-framework :as frame]
[georepl.shapes :as shapes]
[georepl.mathlib :as math]
[georepl.elements :as elements]
[georepl.user :as user]
[georepl.configuration :as config])
(:gen-class))
;; read drawing from a file
(defn- file2drawing [filename]
(let [drw (read-string
(slurp (apply str (concat
(:drawings-directory config/Configuration)
filename))))
p-ref (math/vec-scal-mult 0.5 (:size drw))]
(assoc drw :filename filename
:p-ref p-ref)))
;; scale a drawing so it fits in a given icon box
(defn- drawing2size [drawing icon-box-size]
(let [[cur-x cur-y] (:size drawing)
ratio (max (/ (first icon-box-size) cur-x)(/ (second icon-box-size) cur-y))
frame [(shapes/constructText (:filename drawing) [0 cur-y][cur-x (* 1.1 cur-y)])
(shapes/constructLine [0 0] [cur-x 0])
(shapes/constructLine [cur-x 0] [cur-x cur-y])
(shapes/constructLine [cur-x cur-y][0 cur-y])
(shapes/constructLine [0 cur-y][0 0])]
frm (assoc (shapes/constructCompound frame)
:p-ref (math/vec-scal-mult 0.5 (:size drawing))
:subtype :frame)]
(shapes/scale
(assoc drawing :elems (vec (cons frm (:elems drawing))))
ratio)))
;; order a list of icons to fit in a box of the given size
(defn- order-icons [icons size]
scale drawing to 0.25 of its original size and leave some margin ( 10 % )
icons-per-line (int (/ (first size) (max icon-len 1)))
dist (/ (first size) icons-per-line)
p-center-list (map
#(math/vec-add
(math/vec-scal-mult 0.5 [dist dist])
(math/vec-scal-mult dist [(mod % icons-per-line)(int (/ % icons-per-line))]))
(range (count icons)))]
(map
#(shapes/translate %1 (math/vec-sub %2 (:p-ref %1)))
icons p-center-list)))
;; helper to find a number as part of a string
(defn- str2int [s]
(let [sNum (re-find #"[0-9]*" s)]
(if (empty? sNum)
nil
(Integer/parseInt sNum))))
create a new unique filename . The file name pattern is < root from config><first new number>.grl
(defn- new-unique-filename [files]
(let [name-root (:new-drawing-name-root config/Configuration)
temp-files (filter #(str/starts-with? % name-root) files)]
(if (empty? temp-files)
(format "%s0.grl" name-root)
(let [len (count name-root)
s-append-list (map #(apply str (drop len %)) temp-files)]
(if (empty? s-append-list)
(format "%s0.grl" name-root 1)
(let [coll (keep str2int s-append-list)
free-num (last
(first
(drop-while
(comp not first)
(map #(list (not-any? (partial = %) coll) %) (range)))))]
(format "%s%d.grl" name-root free-num)))))))
;; create an empty drawing to start with
(defn- create-new-drawing [size files]
(let [filename (new-unique-filename files)]
(assoc (shapes/constructCompound [])
:subtype :drawing
:p-ref (math/vec-scal-mult 0.5 size)
:filename filename
:size size)))
;select a drawing from the ones in the working directory (defined in configs)
(defn- select-drawing [all-files size]
select drawing if available or return nil to start with empty one
(let [files (filter #(= (take 4 (reverse %)) '(\l \r \g \.)) all-files)]
(if (empty? files)
nil
(let [tmp (create-new-drawing [800 800] files)
dummy (assoc tmp :filename (format "<%s>" (:filename tmp)))
drawings (cons
dummy
make dummy ( empty drawing ) first element
icon-box-size (math/vec-scal-mult (* 0.9 0.25) size)
icons (map #(drawing2size % icon-box-size) drawings)
elems (order-icons icons size)]
(if (empty? elems)
nil
(let [drw (assoc (shapes/constructCompound (vec elems))
:subtype :drawing
:p-ref (math/vec-scal-mult 0.5 size))]
(do
(elements/push-elem drw)
drw)))))))
start a new quil sketch and reinitialize the elements stack with a new ( empty ) drawing
(defn- start-new-drawing [size files]
(frame/init-frame-gui)
(elements/clear)
(let [[server upd-f](user/start)]
(elements/register upd-f))
(elements/push-elem (create-new-drawing size files)))
;; Start gui frame with the selected drawing. This code is injected into draw-framework
;; and is started when the gallery frame is done (synchronization).
(defn- start-existing-drawing [filename]
(let [size [800 800]
files (.list (io/file (:drawings-directory config/Configuration)))]
(if (some (or (partial = \<)(partial = \>)) filename)
(start-new-drawing size files)
(let [drw (file2drawing filename)]
(elements/push-elem drw)
(frame/init-frame-gui)
(user/start)))))
(defn -main [& args]
(frame/init-renderer :quil)
(let [size [600 600]
files (.list (io/file (:drawings-directory config/Configuration)))]
(if-let [drw-list (select-drawing files size)]
;; start gallery
(frame/init-frame-gallery drw-list start-existing-drawing)
(start-new-drawing size files))))
;; start the show ...
;(-main)
| null | https://raw.githubusercontent.com/georepl/georepl/1502ae04bbc3cab757000714008ccecc4e9e571b/src/georepl/main.clj | clojure | read drawing from a file
scale a drawing so it fits in a given icon box
order a list of icons to fit in a box of the given size
helper to find a number as part of a string
create an empty drawing to start with
select a drawing from the ones in the working directory (defined in configs)
Start gui frame with the selected drawing. This code is injected into draw-framework
and is started when the gallery frame is done (synchronization).
start gallery
start the show ...
(-main) | (ns georepl.main
(:require [clojure.java.io :as io]
[clojure.string :as str]
[georepl.draw-framework :as frame]
[georepl.shapes :as shapes]
[georepl.mathlib :as math]
[georepl.elements :as elements]
[georepl.user :as user]
[georepl.configuration :as config])
(:gen-class))
(defn- file2drawing [filename]
(let [drw (read-string
(slurp (apply str (concat
(:drawings-directory config/Configuration)
filename))))
p-ref (math/vec-scal-mult 0.5 (:size drw))]
(assoc drw :filename filename
:p-ref p-ref)))
(defn- drawing2size [drawing icon-box-size]
(let [[cur-x cur-y] (:size drawing)
ratio (max (/ (first icon-box-size) cur-x)(/ (second icon-box-size) cur-y))
frame [(shapes/constructText (:filename drawing) [0 cur-y][cur-x (* 1.1 cur-y)])
(shapes/constructLine [0 0] [cur-x 0])
(shapes/constructLine [cur-x 0] [cur-x cur-y])
(shapes/constructLine [cur-x cur-y][0 cur-y])
(shapes/constructLine [0 cur-y][0 0])]
frm (assoc (shapes/constructCompound frame)
:p-ref (math/vec-scal-mult 0.5 (:size drawing))
:subtype :frame)]
(shapes/scale
(assoc drawing :elems (vec (cons frm (:elems drawing))))
ratio)))
(defn- order-icons [icons size]
scale drawing to 0.25 of its original size and leave some margin ( 10 % )
icons-per-line (int (/ (first size) (max icon-len 1)))
dist (/ (first size) icons-per-line)
p-center-list (map
#(math/vec-add
(math/vec-scal-mult 0.5 [dist dist])
(math/vec-scal-mult dist [(mod % icons-per-line)(int (/ % icons-per-line))]))
(range (count icons)))]
(map
#(shapes/translate %1 (math/vec-sub %2 (:p-ref %1)))
icons p-center-list)))
(defn- str2int [s]
(let [sNum (re-find #"[0-9]*" s)]
(if (empty? sNum)
nil
(Integer/parseInt sNum))))
create a new unique filename . The file name pattern is < root from config><first new number>.grl
(defn- new-unique-filename [files]
(let [name-root (:new-drawing-name-root config/Configuration)
temp-files (filter #(str/starts-with? % name-root) files)]
(if (empty? temp-files)
(format "%s0.grl" name-root)
(let [len (count name-root)
s-append-list (map #(apply str (drop len %)) temp-files)]
(if (empty? s-append-list)
(format "%s0.grl" name-root 1)
(let [coll (keep str2int s-append-list)
free-num (last
(first
(drop-while
(comp not first)
(map #(list (not-any? (partial = %) coll) %) (range)))))]
(format "%s%d.grl" name-root free-num)))))))
(defn- create-new-drawing [size files]
(let [filename (new-unique-filename files)]
(assoc (shapes/constructCompound [])
:subtype :drawing
:p-ref (math/vec-scal-mult 0.5 size)
:filename filename
:size size)))
(defn- select-drawing [all-files size]
select drawing if available or return nil to start with empty one
(let [files (filter #(= (take 4 (reverse %)) '(\l \r \g \.)) all-files)]
(if (empty? files)
nil
(let [tmp (create-new-drawing [800 800] files)
dummy (assoc tmp :filename (format "<%s>" (:filename tmp)))
drawings (cons
dummy
make dummy ( empty drawing ) first element
icon-box-size (math/vec-scal-mult (* 0.9 0.25) size)
icons (map #(drawing2size % icon-box-size) drawings)
elems (order-icons icons size)]
(if (empty? elems)
nil
(let [drw (assoc (shapes/constructCompound (vec elems))
:subtype :drawing
:p-ref (math/vec-scal-mult 0.5 size))]
(do
(elements/push-elem drw)
drw)))))))
start a new quil sketch and reinitialize the elements stack with a new ( empty ) drawing
(defn- start-new-drawing [size files]
(frame/init-frame-gui)
(elements/clear)
(let [[server upd-f](user/start)]
(elements/register upd-f))
(elements/push-elem (create-new-drawing size files)))
(defn- start-existing-drawing [filename]
(let [size [800 800]
files (.list (io/file (:drawings-directory config/Configuration)))]
(if (some (or (partial = \<)(partial = \>)) filename)
(start-new-drawing size files)
(let [drw (file2drawing filename)]
(elements/push-elem drw)
(frame/init-frame-gui)
(user/start)))))
(defn -main [& args]
(frame/init-renderer :quil)
(let [size [600 600]
files (.list (io/file (:drawings-directory config/Configuration)))]
(if-let [drw-list (select-drawing files size)]
(frame/init-frame-gallery drw-list start-existing-drawing)
(start-new-drawing size files))))
|
86c0e5725b92dec107b6fccea80500558f146efc2b03f9a5c4ccb0b4e968e446 | kmi/irs | trusted-travel-suite.lisp | , 2007
;;; Testing material for the trusted virtual travel agent application.
(in-package :irs.tests)
(def-suite trusted-travel-suite
:description "Tests for the trusted virtual travel agent application.")
(in-suite trusted-travel-suite)
(test goal-invocation-test
(is (string-equal
"(
Applicable Web Services:
<br/>GET-TRAIN-TIMETABLE-SERVICE-T3
<br/>GET-TRAIN-TIMETABLE-SERVICE-T2
<br/>GET-TRAIN-TIMETABLE-SERVICE-T1
<br/><br/> The WS class that matches with STEFANIA trust requirements is : GET-TRAIN-TIMETABLE-SERVICE-T2
<br/><br/>The result is:
<br/> \"Timetable of trains from Frankfurt to Berlin on 1, January, 6
6:47
7:35
8:23
9:11
9:59
10:47
11:35
12:23
13:11
13:59
14:47
15:35
16:23
17:11
17:59
18:47
19:35
20:23
21:11
21:59
22:47\")"
(with-output-to-string (str)
(ip::raw-trusted-irs-achieve-goal 'ocml::stefania
'ocml::TRUST-heuristic-classification
'ocml::GET-TRAIN-TIMETABLE-TRUSTED-GOAL
'((ocml::HAS-DEPARTURE-STATION ocml::FRANKFURT)
(ocml::HAS-DESTINATION-STATION ocml::BERLIN)
(ocml::HAS-DATE-AND-TIME (1 1 6)))
str nil t)))))
| null | https://raw.githubusercontent.com/kmi/irs/e1b8d696f61c6b6878c0e92d993ed549fee6e7dd/tests/trusted-travel-suite.lisp | lisp | Testing material for the trusted virtual travel agent application. | , 2007
(in-package :irs.tests)
(def-suite trusted-travel-suite
:description "Tests for the trusted virtual travel agent application.")
(in-suite trusted-travel-suite)
(test goal-invocation-test
(is (string-equal
"(
Applicable Web Services:
<br/>GET-TRAIN-TIMETABLE-SERVICE-T3
<br/>GET-TRAIN-TIMETABLE-SERVICE-T2
<br/>GET-TRAIN-TIMETABLE-SERVICE-T1
<br/><br/> The WS class that matches with STEFANIA trust requirements is : GET-TRAIN-TIMETABLE-SERVICE-T2
<br/><br/>The result is:
<br/> \"Timetable of trains from Frankfurt to Berlin on 1, January, 6
6:47
7:35
8:23
9:11
9:59
10:47
11:35
12:23
13:11
13:59
14:47
15:35
16:23
17:11
17:59
18:47
19:35
20:23
21:11
21:59
22:47\")"
(with-output-to-string (str)
(ip::raw-trusted-irs-achieve-goal 'ocml::stefania
'ocml::TRUST-heuristic-classification
'ocml::GET-TRAIN-TIMETABLE-TRUSTED-GOAL
'((ocml::HAS-DEPARTURE-STATION ocml::FRANKFURT)
(ocml::HAS-DESTINATION-STATION ocml::BERLIN)
(ocml::HAS-DATE-AND-TIME (1 1 6)))
str nil t)))))
|
63bb419871c93c422da345ea7bdbd226785e59e880f1463fa8445a20f6db6c6b | sboehler/servant-starter-app | Entity.hs | module Types.Entity
( Entity(..)
, Id
) where
import Data.Aeson (ToJSON, (.=), object, toJSON)
import Database.PostgreSQL.Simple.FromField (FromField(..))
import Database.PostgreSQL.Simple.FromRow (FromRow, field, fromRow)
import Database.PostgreSQL.Simple.ToField (ToField(..))
import Database.PostgreSQL.Simple.ToRow (ToRow, toRow)
-- A type family for database ids
type family Id a
-- A wrapper for models
data Entity model = Entity
{ entityId :: Id model
, entityModel :: model
}
deriving instance
(Show (Id model), Show model) => Show (Entity model)
instance (Eq (Id model)) => Eq (Entity model) where
e1 == e2 = entityId e1 == entityId e2
instance (ToField (Id model), ToRow model) => ToRow (Entity model) where
toRow (Entity i m) = toField i : toRow m
instance (FromField (Id model), FromRow model) => FromRow (Entity model) where
fromRow = Entity <$> field <*> fromRow
instance (ToJSON (Id model), ToJSON model) => ToJSON (Entity model) where
toJSON e = object ["id" .= toJSON (entityId e), "model" .= toJSON (entityModel e)]
| null | https://raw.githubusercontent.com/sboehler/servant-starter-app/ebcd90ba0ec8e7ed411ebeb5ef2119e6000cc398/src/Types/Entity.hs | haskell | A type family for database ids
A wrapper for models | module Types.Entity
( Entity(..)
, Id
) where
import Data.Aeson (ToJSON, (.=), object, toJSON)
import Database.PostgreSQL.Simple.FromField (FromField(..))
import Database.PostgreSQL.Simple.FromRow (FromRow, field, fromRow)
import Database.PostgreSQL.Simple.ToField (ToField(..))
import Database.PostgreSQL.Simple.ToRow (ToRow, toRow)
type family Id a
data Entity model = Entity
{ entityId :: Id model
, entityModel :: model
}
deriving instance
(Show (Id model), Show model) => Show (Entity model)
instance (Eq (Id model)) => Eq (Entity model) where
e1 == e2 = entityId e1 == entityId e2
instance (ToField (Id model), ToRow model) => ToRow (Entity model) where
toRow (Entity i m) = toField i : toRow m
instance (FromField (Id model), FromRow model) => FromRow (Entity model) where
fromRow = Entity <$> field <*> fromRow
instance (ToJSON (Id model), ToJSON model) => ToJSON (Entity model) where
toJSON e = object ["id" .= toJSON (entityId e), "model" .= toJSON (entityModel e)]
|
b63acd1475f23e90cd42171b5bb6246c15f12f8c7a55cc574d4924c9057afcc6 | tdammers/sprinkles | TemplateContext.hs | # LANGUAGE DeriveGeneric #
# LANGUAGE NoImplicitPrelude #
{-#LANGUAGE OverloadedStrings #-}
# LANGUAGE OverloadedLists #
# LANGUAGE LambdaCase #
# LANGUAGE ScopedTypeVariables #
# LANGUAGE FlexibleInstances #
# LANGUAGE FlexibleContexts #
# LANGUAGE MultiParamTypeClasses #
# LANGUAGE TypeApplications #
module Web.Sprinkles.TemplateContext
where
import Web.Sprinkles.Prelude
import Text.Ginger
(parseGinger, Template, runGingerT, GingerContext, GVal(..), ToGVal(..),
(~>))
import Text.Ginger.Html
(unsafeRawHtml, html)
import qualified Text.Ginger as Ginger
import qualified Data.Yaml as YAML
import Data.Aeson (ToJSON (..), FromJSON (..))
import qualified Data.Aeson as JSON
import qualified Data.Aeson.Types as JSON
import qualified Data.Aeson.Encode.Pretty as JSON
import Data.Default (Default, def)
import Data.Text (Text)
import qualified Data.Text as Text
import System.Locale.Read (getLocale)
import qualified Text.Pandoc as Pandoc
import Text.Pandoc (PandocMonad)
import qualified Text.Pandoc.Readers.CustomCreole as PandocCreole
import qualified Data.ByteString.UTF8 as UTF8
import qualified Data.ByteString.Lazy.UTF8 as LUTF8
import Data.ByteString.Builder (stringUtf8)
import qualified Network.Wai as Wai
import qualified Data.CaseInsensitive as CI
import Network.HTTP.Types.URI (queryToQueryText)
import qualified Crypto.KDF.BCrypt as BCrypt
import Control.Monad.Except (throwError)
import System.IO.Temp
import System.Process (callProcess)
import System.Directory (doesFileExist)
import qualified Data.ByteString.Base64 as Base64
import Data.Char (isDigit)
import GHC.Stack
import Web.Sprinkles.Pandoc (pandocReaderOptions)
import Web.Sprinkles.Backends
import Web.Sprinkles.Backends.Spec (backendSpecFromJSON)
import Web.Sprinkles.Exceptions
import Web.Sprinkles.Logger as Logger
import Web.Sprinkles.Backends.Loader.Type
(RequestContext (..), pbsFromRequest, pbsInvalid)
import Web.Sprinkles.SessionHandle
import Data.RandomString (randomStr)
import Text.Printf (printf)
sprinklesGingerContext :: RawBackendCache
-> Wai.Request
-> Maybe SessionHandle
-> Logger
-> IO (HashMap Text (GVal (Ginger.Run p IO h)))
sprinklesGingerContext cache request session logger = do
csrfTokenMay <- case session of
Nothing -> return Nothing
Just handle -> sessionGet handle "csrf"
writeLog logger Debug . pack . printf "CSRF token: %s" . show $ csrfTokenMay
let csrfTokenInput = case csrfTokenMay of
Just token ->
mconcat
[ unsafeRawHtml "<input type=\"hidden\" name=\"__form_token\" value=\""
, html token
, unsafeRawHtml "\"/>"
]
Nothing ->
unsafeRawHtml "<!-- no form token defined -->"
return $ mapFromList
[ "request" ~> request
, "session" ~> session
, "formToken" ~> csrfTokenMay
, "formTokenInput" ~> csrfTokenInput
, ("load", Ginger.fromFunction (gfnLoadBackendData (writeLog logger) cache))
] <> baseGingerContext logger
baseGingerContext :: Logger
-> HashMap Text (GVal (Ginger.Run p IO h))
baseGingerContext logger =
mapFromList
[ ("ellipse", Ginger.fromFunction gfnEllipse)
, ("json", Ginger.fromFunction gfnJSON)
, ("yaml", Ginger.fromFunction gfnYAML)
, ("getlocale", Ginger.fromFunction (gfnGetLocale (writeLog logger)))
, ("pandoc", Ginger.fromFunction (gfnPandoc (writeLog logger)))
, ("markdown", Ginger.fromFunction (gfnPandocAlias "markdown" (writeLog logger)))
, ("textile", Ginger.fromFunction (gfnPandocAlias "textile" (writeLog logger)))
, ("rst", Ginger.fromFunction (gfnPandocAlias "rst" (writeLog logger)))
, ("creole", Ginger.fromFunction (gfnPandocAlias "creole" (writeLog logger)))
, ("bcrypt", gnsBCrypt)
, ("randomStr", Ginger.fromFunction gfnRandomStr)
, ("lilypond", Ginger.fromFunction (gfnLilypond (writeLog logger)))
, ("dataurl", Ginger.fromFunction (gfnDataUrl (writeLog logger)))
]
gnsBCrypt :: GVal (Ginger.Run p IO h)
gnsBCrypt =
Ginger.dict
[ ("hash", Ginger.fromFunction gfnBCryptHash)
, ("validate", Ginger.fromFunction gfnBCryptValidate)
]
gfnBCryptHash :: Ginger.Function (Ginger.Run p IO h)
gfnBCryptHash args = do
let argSpec :: [(Text, Ginger.GVal (Ginger.Run p IO h))]
argSpec = [ ("password", def)
, ("cost", toGVal (4 :: Int))
, ("algorithm", toGVal ("$2y$" :: Text))
]
case Ginger.extractArgsDefL argSpec args of
Right [passwordG, costG, algorithmG] -> do
let password = encodeUtf8 . Ginger.asText $ passwordG
algorithm = encodeUtf8 . Ginger.asText $ algorithmG
cost <- maybe
(throwM $ GingerInvalidFunctionArgs "bcrypt.hash" "int cost")
(return . ceiling)
(asNumber costG)
hash :: ByteString <- liftIO $ BCrypt.hashPassword cost password
return . toGVal . decodeUtf8 $ hash
_ -> throwM $ GingerInvalidFunctionArgs "bcrypt.hash" "string password, int cost, string algorithm"
gfnRandomStr :: Ginger.Function (Ginger.Run p IO h)
gfnRandomStr args = do
let defaultAlphabet = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] :: String
argSpec :: [(Text, Ginger.GVal (Ginger.Run p IO h))]
argSpec = [ ("length", toGVal (8 :: Int))
, ("alphabet", toGVal defaultAlphabet)
]
case Ginger.extractArgsDefL argSpec args of
Right [lengthG, alphabetG] -> do
desiredLength :: Int <- case fmap round . asNumber $ lengthG of
Nothing -> throwM $ GingerInvalidFunctionArgs "randomStr" "int length"
Just l -> return l
let alphabet :: String
alphabet = unpack . Ginger.asText $ alphabetG
when (null alphabet)
(throwM $ GingerInvalidFunctionArgs "randomStr" "alphabet too small")
liftIO $ toGVal <$> randomStr alphabet desiredLength
_ -> throwM $ GingerInvalidFunctionArgs "randomStr" "int length, string alphabet"
gfnBCryptValidate :: Ginger.Function (Ginger.Run p IO h)
gfnBCryptValidate args = do
let argSpec :: [(Text, Ginger.GVal (Ginger.Run p IO h))]
argSpec = [ ("hash", def)
, ("password", def)
]
case Ginger.extractArgsDefL argSpec args of
Right [hashG, passwordG] -> do
let hash = encodeUtf8 . Ginger.asText $ hashG
password = encodeUtf8 . Ginger.asText $ passwordG
return . toGVal $ BCrypt.validatePassword hash password
_ -> throwM $ GingerInvalidFunctionArgs "bcrypt.validate" "string password, int cost, string algorithm"
gfnLilypond :: (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnLilypond writeLog args = liftIO . catchToGinger writeLog $ do
case Ginger.extractArgsDefL [("src", ""), ("dpi", "144"), ("width", "120mm"), ("inline", "true"), ("raw", "")] args of
Right [srcG, dpiG, widthG, inlineG, rawG] -> do
let dpi = fromMaybe 0 $ Ginger.asNumber dpiG
width = parseWidth . Ginger.asText $ widthG
inline = Ginger.asBoolean inlineG
raw = Ginger.asBoolean rawG
parseWidth xs =
let numeric = takeWhile isDigit xs
unit = dropWhile isDigit xs
in numeric <> "\\" <> unit
let rawSrc = Ginger.asText srcG
src = if inline then
"\\paper {\n" <>
" indent=0\\mm\n" <>
" line-width=" <> width <> "\n" <>
" oddFooterMarkup=##f\n" <>
" oddHeaderMarkup=##f\n" <>
" bookTitleMarkup = ##f\n" <>
" scoreTitleMarkup = ##f\n" <>
"}\n" <>
rawSrc
else
rawSrc
dir <- getCanonicalTemporaryDirectory
let hash = sha1 (encodeUtf8 . fromStrict $ src <> Ginger.asText dpiG)
let rawFilename = dir </> hash
lyFilename = rawFilename <.> "ly"
pngFilename = rawFilename <.> "png"
doesFileExist pngFilename >>= flip unless (do
writeFile lyFilename src
callProcess "lilypond"
[ "--png", "-dsafe"
, "-dbackend=eps"
, "-dno-gs-load-fonts"
, "-dinclude-eps-fonts"
, "-dpixmap-format=pngalpha"
, "-dresolution=" <> show dpi
, "-o", rawFilename
, lyFilename
]
)
png <- readFile pngFilename
if raw then
return . toGVal $ png
else do
let dataUrl = decodeUtf8 $ "data:image/png;base64," <> Base64.encode png
return . toGVal $ dataUrl
s ->
throwM $ GingerInvalidFunctionArgs "lilypond" "string src, int dpi=144, string width='120mm'"
gfnDataUrl :: forall p h. (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnDataUrl writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("body", ""), ("type", def)] args of
Right [bodyG, contentTypeG] -> do
let read = Ginger.asFunction =<<
Ginger.lookupKey "read" =<<
(Ginger.lookupKey "bytes" bodyG)
let bodyBytes = fromMaybe "" $ Ginger.asBytes bodyG
let contentType =
if Ginger.isNull contentTypeG then
"application/octet-stream"
else
Ginger.asText contentTypeG
return . toGVal $
"data:" <>
contentType <>
";base64," <>
(decodeUtf8 . Base64.encode $ bodyBytes)
_ ->
throwM $ GingerInvalidFunctionArgs "dataurl" "string body, string type"
gfnPandoc :: forall p h. (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnPandoc writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("src", ""), ("reader", "markdown")] args of
Right [src, readerName] -> do
toGVal <$> pandoc (Ginger.asText readerName) (Ginger.asText src)
_ ->
throwM $ GingerInvalidFunctionArgs "pandoc" "string src, string reader"
gfnPandocAlias :: forall p h. Text -> (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnPandocAlias readerName writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("src", "")] args of
Right [src] ->
toGVal <$> pandoc readerName (Ginger.asText src)
_ ->
throwM $ GingerInvalidFunctionArgs readerName "string src"
pandoc :: Text -> Text -> IO Pandoc.Pandoc
pandoc readerName src = do
reader <- either
(\err -> fail $ "Invalid reader: " ++ show err)
return
(Pandoc.runPure $ getReader readerName)
let read = case reader of
Pandoc.TextReader r ->
r pandocReaderOptions
Pandoc.ByteStringReader r ->
r pandocReaderOptions . encodeUtf8 . fromStrict
(pure . Pandoc.runPure . read $ src) >>= either
(\err -> fail $ "Reading " ++ show readerName ++ " failed: " ++ show err)
return
where
getReader :: Text -> Pandoc.PandocPure (Pandoc.Reader Pandoc.PandocPure)
getReader "creole-tdammers" = fst <$> customCreoleReader
getReader readerName = fst <$> Pandoc.getReader readerName
customCreoleReader :: forall m. PandocMonad m => m (Pandoc.Reader m, Pandoc.Extensions)
customCreoleReader =
return (Pandoc.TextReader $ reader, Pandoc.emptyExtensions)
where
reader :: Pandoc.ReaderOptions -> Text -> m Pandoc.Pandoc
reader opts src =
either throwError return $
PandocCreole.readCustomCreole opts (unpack src)
gfnGetLocale :: forall p h. (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnGetLocale writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("category", "LC_TIME"), ("locale", "")] args of
Right [gCat, gName] ->
case (Ginger.asText gCat, Text.unpack . Ginger.asText $ gName) of
("LC_TIME", "") -> toGVal <$> getLocale Nothing
("LC_TIME", localeName) -> toGVal <$> getLocale (Just localeName)
(cat, localeName) -> return def -- valid call, but category not implemented
_ -> throwM $ GingerInvalidFunctionArgs "getlocale" "string category, string name"
gfnEllipse :: Ginger.Function (Ginger.Run p IO h)
gfnEllipse [] = return def
gfnEllipse [(Nothing, str)] =
gfnEllipse [(Nothing, str), (Nothing, toGVal (100 :: Int))]
gfnEllipse [(Nothing, str), (Nothing, len)] = do
let txt = Ginger.asText str
actualLen = Web.Sprinkles.Prelude.length txt
targetLen = fromMaybe 100 $ ceiling <$> Ginger.asNumber len
txt' = if actualLen + 3 > targetLen
then take (targetLen - 3) txt <> "..."
else txt
return . toGVal $ txt'
gfnEllipse ((Nothing, str):xs) = do
let len = fromMaybe (toGVal (100 :: Int)) $ lookup (Just "len") xs
gfnEllipse [(Nothing, str), (Nothing, len)]
gfnEllipse xs = do
let str = fromMaybe def $ lookup (Just "str") xs
gfnEllipse $ (Nothing, str):xs
gfnJSON :: Ginger.Function (Ginger.Run p IO h)
gfnJSON ((_, x):_) =
return . toGVal . LUTF8.toString . JSON.encodePretty $ x
gfnJSON _ =
return def
gfnYAML :: Ginger.Function (Ginger.Run p IO h)
gfnYAML ((_, x):_) =
return . toGVal . UTF8.toString . YAML.encode $ x
gfnYAML _ =
return def
gfnLoadBackendData :: forall p h. HasCallStack => (LogLevel -> Text -> IO ()) -> RawBackendCache -> Ginger.Function (Ginger.Run p IO h)
gfnLoadBackendData writeLog cache args =
Ginger.dict <$> forM (zip [0..] args) loadPair
where
loadPair :: (Int, (Maybe Text, GVal (Ginger.Run p IO h)))
-> Ginger.Run p IO h (Text, GVal (Ginger.Run p IO h))
loadPair (index, (keyMay, gBackendURL)) = do
backendSpec <- either fail pure . JSON.parseEither backendSpecFromJSON . toJSON $ gBackendURL
backendData :: Items (BackendData p IO h) <- liftIO $
loadBackendData writeLog pbsInvalid cache backendSpec
return
( fromMaybe (tshow @Text index) keyMay
, toGVal backendData
)
catchToGinger :: forall h m. (LogLevel -> Text -> IO ())
-> IO (GVal m)
-> IO (GVal m)
catchToGinger writeLog action =
action
`catch` (\(e :: SomeException) -> do
writeLog Logger.Error . formatException $ e
return . toGVal $ False
)
instance ToGVal m Wai.Request where
toGVal rq =
Ginger.orderedDict
[ "httpVersion" ~> tshow @Text (Wai.httpVersion rq)
, "method" ~> decodeUtf8 @Text (Wai.requestMethod rq)
, "path" ~> decodeUtf8 @Text (Wai.rawPathInfo rq)
, "query" ~> decodeUtf8 @Text (Wai.rawQueryString rq)
, "pathInfo" ~> Wai.pathInfo rq
, ( "queryInfo"
, Ginger.orderedDict
[ (key, toGVal val)
| (key, val)
<- queryToQueryText (Wai.queryString rq)
]
)
, ( "headers"
, Ginger.orderedDict
[ (decodeCI n, toGVal $ decodeUtf8 v)
| (n, v)
<- Wai.requestHeaders rq
]
)
]
decodeCI :: CI.CI ByteString -> Text
decodeCI = decodeUtf8 . CI.original
| null | https://raw.githubusercontent.com/tdammers/sprinkles/a9161e4506427a3cf5f686654edc7ed9aa3ea82b/src/Web/Sprinkles/TemplateContext.hs | haskell | #LANGUAGE OverloadedStrings #
valid call, but category not implemented | # LANGUAGE DeriveGeneric #
# LANGUAGE NoImplicitPrelude #
# LANGUAGE OverloadedLists #
# LANGUAGE LambdaCase #
# LANGUAGE ScopedTypeVariables #
# LANGUAGE FlexibleInstances #
# LANGUAGE FlexibleContexts #
# LANGUAGE MultiParamTypeClasses #
# LANGUAGE TypeApplications #
module Web.Sprinkles.TemplateContext
where
import Web.Sprinkles.Prelude
import Text.Ginger
(parseGinger, Template, runGingerT, GingerContext, GVal(..), ToGVal(..),
(~>))
import Text.Ginger.Html
(unsafeRawHtml, html)
import qualified Text.Ginger as Ginger
import qualified Data.Yaml as YAML
import Data.Aeson (ToJSON (..), FromJSON (..))
import qualified Data.Aeson as JSON
import qualified Data.Aeson.Types as JSON
import qualified Data.Aeson.Encode.Pretty as JSON
import Data.Default (Default, def)
import Data.Text (Text)
import qualified Data.Text as Text
import System.Locale.Read (getLocale)
import qualified Text.Pandoc as Pandoc
import Text.Pandoc (PandocMonad)
import qualified Text.Pandoc.Readers.CustomCreole as PandocCreole
import qualified Data.ByteString.UTF8 as UTF8
import qualified Data.ByteString.Lazy.UTF8 as LUTF8
import Data.ByteString.Builder (stringUtf8)
import qualified Network.Wai as Wai
import qualified Data.CaseInsensitive as CI
import Network.HTTP.Types.URI (queryToQueryText)
import qualified Crypto.KDF.BCrypt as BCrypt
import Control.Monad.Except (throwError)
import System.IO.Temp
import System.Process (callProcess)
import System.Directory (doesFileExist)
import qualified Data.ByteString.Base64 as Base64
import Data.Char (isDigit)
import GHC.Stack
import Web.Sprinkles.Pandoc (pandocReaderOptions)
import Web.Sprinkles.Backends
import Web.Sprinkles.Backends.Spec (backendSpecFromJSON)
import Web.Sprinkles.Exceptions
import Web.Sprinkles.Logger as Logger
import Web.Sprinkles.Backends.Loader.Type
(RequestContext (..), pbsFromRequest, pbsInvalid)
import Web.Sprinkles.SessionHandle
import Data.RandomString (randomStr)
import Text.Printf (printf)
sprinklesGingerContext :: RawBackendCache
-> Wai.Request
-> Maybe SessionHandle
-> Logger
-> IO (HashMap Text (GVal (Ginger.Run p IO h)))
sprinklesGingerContext cache request session logger = do
csrfTokenMay <- case session of
Nothing -> return Nothing
Just handle -> sessionGet handle "csrf"
writeLog logger Debug . pack . printf "CSRF token: %s" . show $ csrfTokenMay
let csrfTokenInput = case csrfTokenMay of
Just token ->
mconcat
[ unsafeRawHtml "<input type=\"hidden\" name=\"__form_token\" value=\""
, html token
, unsafeRawHtml "\"/>"
]
Nothing ->
unsafeRawHtml "<!-- no form token defined -->"
return $ mapFromList
[ "request" ~> request
, "session" ~> session
, "formToken" ~> csrfTokenMay
, "formTokenInput" ~> csrfTokenInput
, ("load", Ginger.fromFunction (gfnLoadBackendData (writeLog logger) cache))
] <> baseGingerContext logger
baseGingerContext :: Logger
-> HashMap Text (GVal (Ginger.Run p IO h))
baseGingerContext logger =
mapFromList
[ ("ellipse", Ginger.fromFunction gfnEllipse)
, ("json", Ginger.fromFunction gfnJSON)
, ("yaml", Ginger.fromFunction gfnYAML)
, ("getlocale", Ginger.fromFunction (gfnGetLocale (writeLog logger)))
, ("pandoc", Ginger.fromFunction (gfnPandoc (writeLog logger)))
, ("markdown", Ginger.fromFunction (gfnPandocAlias "markdown" (writeLog logger)))
, ("textile", Ginger.fromFunction (gfnPandocAlias "textile" (writeLog logger)))
, ("rst", Ginger.fromFunction (gfnPandocAlias "rst" (writeLog logger)))
, ("creole", Ginger.fromFunction (gfnPandocAlias "creole" (writeLog logger)))
, ("bcrypt", gnsBCrypt)
, ("randomStr", Ginger.fromFunction gfnRandomStr)
, ("lilypond", Ginger.fromFunction (gfnLilypond (writeLog logger)))
, ("dataurl", Ginger.fromFunction (gfnDataUrl (writeLog logger)))
]
gnsBCrypt :: GVal (Ginger.Run p IO h)
gnsBCrypt =
Ginger.dict
[ ("hash", Ginger.fromFunction gfnBCryptHash)
, ("validate", Ginger.fromFunction gfnBCryptValidate)
]
gfnBCryptHash :: Ginger.Function (Ginger.Run p IO h)
gfnBCryptHash args = do
let argSpec :: [(Text, Ginger.GVal (Ginger.Run p IO h))]
argSpec = [ ("password", def)
, ("cost", toGVal (4 :: Int))
, ("algorithm", toGVal ("$2y$" :: Text))
]
case Ginger.extractArgsDefL argSpec args of
Right [passwordG, costG, algorithmG] -> do
let password = encodeUtf8 . Ginger.asText $ passwordG
algorithm = encodeUtf8 . Ginger.asText $ algorithmG
cost <- maybe
(throwM $ GingerInvalidFunctionArgs "bcrypt.hash" "int cost")
(return . ceiling)
(asNumber costG)
hash :: ByteString <- liftIO $ BCrypt.hashPassword cost password
return . toGVal . decodeUtf8 $ hash
_ -> throwM $ GingerInvalidFunctionArgs "bcrypt.hash" "string password, int cost, string algorithm"
gfnRandomStr :: Ginger.Function (Ginger.Run p IO h)
gfnRandomStr args = do
let defaultAlphabet = ['a'..'z'] ++ ['A'..'Z'] ++ ['0'..'9'] :: String
argSpec :: [(Text, Ginger.GVal (Ginger.Run p IO h))]
argSpec = [ ("length", toGVal (8 :: Int))
, ("alphabet", toGVal defaultAlphabet)
]
case Ginger.extractArgsDefL argSpec args of
Right [lengthG, alphabetG] -> do
desiredLength :: Int <- case fmap round . asNumber $ lengthG of
Nothing -> throwM $ GingerInvalidFunctionArgs "randomStr" "int length"
Just l -> return l
let alphabet :: String
alphabet = unpack . Ginger.asText $ alphabetG
when (null alphabet)
(throwM $ GingerInvalidFunctionArgs "randomStr" "alphabet too small")
liftIO $ toGVal <$> randomStr alphabet desiredLength
_ -> throwM $ GingerInvalidFunctionArgs "randomStr" "int length, string alphabet"
gfnBCryptValidate :: Ginger.Function (Ginger.Run p IO h)
gfnBCryptValidate args = do
let argSpec :: [(Text, Ginger.GVal (Ginger.Run p IO h))]
argSpec = [ ("hash", def)
, ("password", def)
]
case Ginger.extractArgsDefL argSpec args of
Right [hashG, passwordG] -> do
let hash = encodeUtf8 . Ginger.asText $ hashG
password = encodeUtf8 . Ginger.asText $ passwordG
return . toGVal $ BCrypt.validatePassword hash password
_ -> throwM $ GingerInvalidFunctionArgs "bcrypt.validate" "string password, int cost, string algorithm"
gfnLilypond :: (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnLilypond writeLog args = liftIO . catchToGinger writeLog $ do
case Ginger.extractArgsDefL [("src", ""), ("dpi", "144"), ("width", "120mm"), ("inline", "true"), ("raw", "")] args of
Right [srcG, dpiG, widthG, inlineG, rawG] -> do
let dpi = fromMaybe 0 $ Ginger.asNumber dpiG
width = parseWidth . Ginger.asText $ widthG
inline = Ginger.asBoolean inlineG
raw = Ginger.asBoolean rawG
parseWidth xs =
let numeric = takeWhile isDigit xs
unit = dropWhile isDigit xs
in numeric <> "\\" <> unit
let rawSrc = Ginger.asText srcG
src = if inline then
"\\paper {\n" <>
" indent=0\\mm\n" <>
" line-width=" <> width <> "\n" <>
" oddFooterMarkup=##f\n" <>
" oddHeaderMarkup=##f\n" <>
" bookTitleMarkup = ##f\n" <>
" scoreTitleMarkup = ##f\n" <>
"}\n" <>
rawSrc
else
rawSrc
dir <- getCanonicalTemporaryDirectory
let hash = sha1 (encodeUtf8 . fromStrict $ src <> Ginger.asText dpiG)
let rawFilename = dir </> hash
lyFilename = rawFilename <.> "ly"
pngFilename = rawFilename <.> "png"
doesFileExist pngFilename >>= flip unless (do
writeFile lyFilename src
callProcess "lilypond"
[ "--png", "-dsafe"
, "-dbackend=eps"
, "-dno-gs-load-fonts"
, "-dinclude-eps-fonts"
, "-dpixmap-format=pngalpha"
, "-dresolution=" <> show dpi
, "-o", rawFilename
, lyFilename
]
)
png <- readFile pngFilename
if raw then
return . toGVal $ png
else do
let dataUrl = decodeUtf8 $ "data:image/png;base64," <> Base64.encode png
return . toGVal $ dataUrl
s ->
throwM $ GingerInvalidFunctionArgs "lilypond" "string src, int dpi=144, string width='120mm'"
gfnDataUrl :: forall p h. (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnDataUrl writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("body", ""), ("type", def)] args of
Right [bodyG, contentTypeG] -> do
let read = Ginger.asFunction =<<
Ginger.lookupKey "read" =<<
(Ginger.lookupKey "bytes" bodyG)
let bodyBytes = fromMaybe "" $ Ginger.asBytes bodyG
let contentType =
if Ginger.isNull contentTypeG then
"application/octet-stream"
else
Ginger.asText contentTypeG
return . toGVal $
"data:" <>
contentType <>
";base64," <>
(decodeUtf8 . Base64.encode $ bodyBytes)
_ ->
throwM $ GingerInvalidFunctionArgs "dataurl" "string body, string type"
gfnPandoc :: forall p h. (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnPandoc writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("src", ""), ("reader", "markdown")] args of
Right [src, readerName] -> do
toGVal <$> pandoc (Ginger.asText readerName) (Ginger.asText src)
_ ->
throwM $ GingerInvalidFunctionArgs "pandoc" "string src, string reader"
gfnPandocAlias :: forall p h. Text -> (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnPandocAlias readerName writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("src", "")] args of
Right [src] ->
toGVal <$> pandoc readerName (Ginger.asText src)
_ ->
throwM $ GingerInvalidFunctionArgs readerName "string src"
pandoc :: Text -> Text -> IO Pandoc.Pandoc
pandoc readerName src = do
reader <- either
(\err -> fail $ "Invalid reader: " ++ show err)
return
(Pandoc.runPure $ getReader readerName)
let read = case reader of
Pandoc.TextReader r ->
r pandocReaderOptions
Pandoc.ByteStringReader r ->
r pandocReaderOptions . encodeUtf8 . fromStrict
(pure . Pandoc.runPure . read $ src) >>= either
(\err -> fail $ "Reading " ++ show readerName ++ " failed: " ++ show err)
return
where
getReader :: Text -> Pandoc.PandocPure (Pandoc.Reader Pandoc.PandocPure)
getReader "creole-tdammers" = fst <$> customCreoleReader
getReader readerName = fst <$> Pandoc.getReader readerName
customCreoleReader :: forall m. PandocMonad m => m (Pandoc.Reader m, Pandoc.Extensions)
customCreoleReader =
return (Pandoc.TextReader $ reader, Pandoc.emptyExtensions)
where
reader :: Pandoc.ReaderOptions -> Text -> m Pandoc.Pandoc
reader opts src =
either throwError return $
PandocCreole.readCustomCreole opts (unpack src)
gfnGetLocale :: forall p h. (LogLevel -> Text -> IO ()) -> Ginger.Function (Ginger.Run p IO h)
gfnGetLocale writeLog args = liftIO . catchToGinger writeLog $
case Ginger.extractArgsDefL [("category", "LC_TIME"), ("locale", "")] args of
Right [gCat, gName] ->
case (Ginger.asText gCat, Text.unpack . Ginger.asText $ gName) of
("LC_TIME", "") -> toGVal <$> getLocale Nothing
("LC_TIME", localeName) -> toGVal <$> getLocale (Just localeName)
_ -> throwM $ GingerInvalidFunctionArgs "getlocale" "string category, string name"
gfnEllipse :: Ginger.Function (Ginger.Run p IO h)
gfnEllipse [] = return def
gfnEllipse [(Nothing, str)] =
gfnEllipse [(Nothing, str), (Nothing, toGVal (100 :: Int))]
gfnEllipse [(Nothing, str), (Nothing, len)] = do
let txt = Ginger.asText str
actualLen = Web.Sprinkles.Prelude.length txt
targetLen = fromMaybe 100 $ ceiling <$> Ginger.asNumber len
txt' = if actualLen + 3 > targetLen
then take (targetLen - 3) txt <> "..."
else txt
return . toGVal $ txt'
gfnEllipse ((Nothing, str):xs) = do
let len = fromMaybe (toGVal (100 :: Int)) $ lookup (Just "len") xs
gfnEllipse [(Nothing, str), (Nothing, len)]
gfnEllipse xs = do
let str = fromMaybe def $ lookup (Just "str") xs
gfnEllipse $ (Nothing, str):xs
gfnJSON :: Ginger.Function (Ginger.Run p IO h)
gfnJSON ((_, x):_) =
return . toGVal . LUTF8.toString . JSON.encodePretty $ x
gfnJSON _ =
return def
gfnYAML :: Ginger.Function (Ginger.Run p IO h)
gfnYAML ((_, x):_) =
return . toGVal . UTF8.toString . YAML.encode $ x
gfnYAML _ =
return def
gfnLoadBackendData :: forall p h. HasCallStack => (LogLevel -> Text -> IO ()) -> RawBackendCache -> Ginger.Function (Ginger.Run p IO h)
gfnLoadBackendData writeLog cache args =
Ginger.dict <$> forM (zip [0..] args) loadPair
where
loadPair :: (Int, (Maybe Text, GVal (Ginger.Run p IO h)))
-> Ginger.Run p IO h (Text, GVal (Ginger.Run p IO h))
loadPair (index, (keyMay, gBackendURL)) = do
backendSpec <- either fail pure . JSON.parseEither backendSpecFromJSON . toJSON $ gBackendURL
backendData :: Items (BackendData p IO h) <- liftIO $
loadBackendData writeLog pbsInvalid cache backendSpec
return
( fromMaybe (tshow @Text index) keyMay
, toGVal backendData
)
catchToGinger :: forall h m. (LogLevel -> Text -> IO ())
-> IO (GVal m)
-> IO (GVal m)
catchToGinger writeLog action =
action
`catch` (\(e :: SomeException) -> do
writeLog Logger.Error . formatException $ e
return . toGVal $ False
)
instance ToGVal m Wai.Request where
toGVal rq =
Ginger.orderedDict
[ "httpVersion" ~> tshow @Text (Wai.httpVersion rq)
, "method" ~> decodeUtf8 @Text (Wai.requestMethod rq)
, "path" ~> decodeUtf8 @Text (Wai.rawPathInfo rq)
, "query" ~> decodeUtf8 @Text (Wai.rawQueryString rq)
, "pathInfo" ~> Wai.pathInfo rq
, ( "queryInfo"
, Ginger.orderedDict
[ (key, toGVal val)
| (key, val)
<- queryToQueryText (Wai.queryString rq)
]
)
, ( "headers"
, Ginger.orderedDict
[ (decodeCI n, toGVal $ decodeUtf8 v)
| (n, v)
<- Wai.requestHeaders rq
]
)
]
decodeCI :: CI.CI ByteString -> Text
decodeCI = decodeUtf8 . CI.original
|
b54484f0c7b796104295eed5c9ca0c911bf7ecb0ff88f4d42db0f381df86e0c2 | bollu/koans | observer-pattern.hs | {-# LANGUAGE ExplicitForAll #-}
{-# LANGUAGE Rank2Types #-}
# LANGUAGE TypeFamilies #
{-# LANGUAGE GADTs #-}
import Control.Applicative
import Data.Traversable
data Observer m e where
MkObserver :: (e -> Maybe a) -> (a -> m ()) -> Observer m e
data Event = KeyEvent Char |
MouseEvent (Int, Int)
data Dispatcher m e = Dispatcher {
observers :: [Observer m e]
}
runEvent :: Applicative m => Dispatcher m e -> e -> m ()
runEvent d e = (for (observers d) run) *> pure () where
run (MkObserver handler action) =
case handler e of
Just (a) -> action a
Nothing -> pure ()
| null | https://raw.githubusercontent.com/bollu/koans/0204e9bb5ef9c541fe161523acac3cacae5d07fe/observer-pattern.hs | haskell | # LANGUAGE ExplicitForAll #
# LANGUAGE Rank2Types #
# LANGUAGE GADTs # | # LANGUAGE TypeFamilies #
import Control.Applicative
import Data.Traversable
data Observer m e where
MkObserver :: (e -> Maybe a) -> (a -> m ()) -> Observer m e
data Event = KeyEvent Char |
MouseEvent (Int, Int)
data Dispatcher m e = Dispatcher {
observers :: [Observer m e]
}
runEvent :: Applicative m => Dispatcher m e -> e -> m ()
runEvent d e = (for (observers d) run) *> pure () where
run (MkObserver handler action) =
case handler e of
Just (a) -> action a
Nothing -> pure ()
|
3277445e67a5cd108bd627ac9d4d01fcb3213ed94353cf640649127df3f05923 | csabahruska/jhc-components | Desugar.hs | -- various desugaring routines
--
-- The general desugaring routine creates selectors for data
-- constructors with named fields, changes all pattern bindings
-- into 'simple' pattern bindings, and adds failure cases to lambda
-- expressions which have failable patterns
module FrontEnd.Desugar (desugarHsModule, desugarHsStmt) where
import FrontEnd.HsSyn
import FrontEnd.SrcLoc
import FrontEnd.Syn.Traverse
import Name.Names
import Ty.Level
import Util.Std
import Util.UniqueMonad
type PatSM = Uniq
instance MonadSrcLoc PatSM where
instance MonadSetSrcLoc PatSM where
withSrcLoc' _ a = a
-- a new (unique) name introduced in pattern selector functions
newPatVarName :: HsName
newPatVarName = toName Val ("pv@"::String)
desugarHsModule :: HsModule -> HsModule
desugarHsModule m = hsModuleDecls_s ds' m where
(ds', _) = runUniq 0 (dsm (hsModuleDecls m)) -- (0::Int)
dsm ds = fmap concat $ mapM desugarDecl ds
desugarHsStmt :: Monad m => HsStmt -> m HsStmt
desugarHsStmt s = return $ fst $ runUniq 0 (desugarStmt s)
desugarDecl :: HsDecl -> PatSM [HsDecl]
desugarDecl (HsFunBind matches) = do
newMatches <- mapM desugarMatch matches
return [HsFunBind newMatches]
--desugarDecl pb@(HsPatBind sloc p rhs wheres) = do
-- newRhs <- desugarRhs rhs
desugarDecl where s
-- return [HsPatBind sloc p newRhs (concat newWheres)]
--variable pattern bindings remain unchanged
desugarDecl HsPatBind { hsDeclPat = hsDeclPat@HsPVar {}, .. } = do
hsDeclRhs <- desugarRhs hsDeclRhs
hsDeclDecls <- concat <$> mapM desugarDecl hsDeclDecls
return [HsPatBind { .. }]
--desugarDecl (HsPatBind sloc pat rhs wheres) = do
desugarDecl HsPatBind { .. } = do
hsDeclRhs <- desugarRhs hsDeclRhs
hsDeclDecls <- concat <$> mapM desugarDecl hsDeclDecls
unique <- newUniq
let newRhsName = toName Val ("rhs@" ++ show unique)
let newBinds = genBindsForPat hsDeclPat hsDeclSrcLoc newRhsName
newBinds <- concat <$> mapM desugarDecl newBinds
let newTopDeclForRhs = HsPatBind { hsDeclPat = HsPVar newRhsName, .. }
return (newTopDeclForRhs : newBinds)
desugarDecl (HsClassDecl sloc qualtype decls) = do
newDecls <- mapM desugarDecl decls
return [HsClassDecl sloc qualtype (concat newDecls)]
desugarDecl (HsInstDecl sloc qualtype decls) = do
newDecls <- mapM desugarDecl decls
return [HsInstDecl sloc qualtype (concat newDecls)]
-- XXX we currently discard instance specializations
desugarDecl HsPragmaSpecialize { hsDeclName = n } | n == u_instance = return []
desugarDecl anyOtherDecl = return [anyOtherDecl]
desugarMatch :: (HsMatch) -> PatSM (HsMatch)
desugarMatch (HsMatch sloc funName pats rhs wheres) = do
newWheres <- mapM desugarDecl wheres
newRhs <- desugarRhs rhs
return (HsMatch sloc funName pats newRhs (concat newWheres))
-- generate the pattern bindings for each variable in a pattern
genBindsForPat :: HsPat -> SrcLoc -> HsName -> [HsDecl]
genBindsForPat pat sloc rhs = ans where
ans = [HsPatBind sloc (HsPVar pn) (HsUnGuardedRhs selector) [] | (pn, selector) <- selFuns]
selFuns = getPatSelFuns sloc pat rhs
-- generate selector functions for each of the variables that
-- are bound in a pattern
getPatSelFuns :: SrcLoc -> HsPat -> Name -> [(Name, HsExp)]
getPatSelFuns sloc pat rhsvar = ans where
ans = [ ( v , ( HsLambda sloc [ HsPVar newPatVarName ] ( kase ( replaceVarNamesInPat v pat ) ) ) ) | v < - getNamesFromHsPat pat , nameType v = = Val ]
ans = [(v, kase (replaceVarNamesInPat v pat)) | v <- getNamesFromHsPat pat, nameType v == Val]
kase p = HsCase (HsVar rhsvar) [a1, a2 ] where
a1 = HsAlt sloc p (HsUnGuardedRhs (HsVar newPatVarName)) []
a2 = HsAlt sloc HsPWildCard (HsUnGuardedRhs (HsError { hsExpSrcLoc = sloc, hsExpErrorType = HsErrorPatternFailure, hsExpString = show sloc ++ " failed pattern match" })) []
a2 = HsAlt sloc HsPWildCard ( HsUnGuardedRhs ( HsApp ( HsVar ( toName ( " error"::String ) ) ) ( HsLit $ HsString $ show sloc + + " failed pattern match " ) ) ) [ ]
-- replaces all occurrences of a name with a new variable
-- and every other name with underscore
replaceVarNamesInPat :: HsName -> HsPat -> HsPat
replaceVarNamesInPat name p = f p where
f (HsPVar name2)
| name == name2 = HsPVar newPatVarName
| getTyLevel name2 == Just termLevel = HsPWildCard
f (HsPAsPat asName pat)
| name == asName = HsPAsPat newPatVarName (f pat)
| getTyLevel asName == Just termLevel = f pat
f p = runIdentity $ traverseHsPat (return . f) p
-- f name p = error $ "replaceVarNamesInPat: " ++ show (name,p)
desugarRhs :: HsRhs -> PatSM HsRhs
desugarRhs = traverseHsExp desugarExp
desugarExp :: HsExp -> PatSM HsExp
desugarExp (HsLambda sloc pats e)
| all isSimplePat pats = do
newE <- desugarExp e
return (HsLambda sloc pats newE)
desugarExp (HsLambda sloc pats e) = do
ps <- mapM f pats
let (xs,zs) = unzip ps
e' <- (ne e $ concat zs)
return (HsLambda sloc (map HsPVar xs) e')
where
ne e [] = desugarExp e
ne e ((n,p):zs) = do
e' <- ne e zs
let a1 = HsAlt sloc p (HsUnGuardedRhs e') []
a2 = HsAlt sloc HsPWildCard (HsUnGuardedRhs (HsError { hsExpSrcLoc = sloc, hsExpErrorType = HsErrorPatternFailure, hsExpString = show sloc ++ " failed pattern match in lambda" })) []
return $ HsCase (HsVar n) [a1, a2 ]
f (HsPVar x) = return (x,[])
f (HsPAsPat n p) = return (n,[(n,p)])
f p = do
unique <- newUniq
let n = toName Val ("lambind@" ++ show unique)
return (n,[(n,p)])
desugarExp (HsLet decls e) = do
newDecls <- mapM desugarDecl decls
HsLet (concat newDecls) <$> desugarExp e
desugarExp (HsCase e alts) = do
newE <- desugarExp e
newAlts <- mapM desugarAlt alts
return (HsCase newE newAlts)
desugarExp (HsDo stmts) = HsDo `liftM` mapM desugarStmt stmts
desugarExp e = traverseHsExp desugarExp e
desugarAlt :: (HsAlt) -> PatSM (HsAlt)
desugarAlt (HsAlt sloc pat gAlts wheres) = do
newGAlts <- desugarRhs gAlts
newWheres <- mapM desugarDecl wheres
return (HsAlt sloc pat newGAlts (concat newWheres))
desugarStmt :: (HsStmt) -> PatSM (HsStmt)
desugarStmt (HsLetStmt decls) = do
newDecls <- mapM desugarDecl decls
return (HsLetStmt $ concat newDecls)
desugarStmt (HsGenerator srcLoc pat e) = HsGenerator srcLoc pat <$> desugarExp e
desugarStmt (HsQualifier e) = HsQualifier <$> desugarExp e
isSimplePat p = f (openPat p) where
f HsPVar {} = True
f HsPWildCard = True
f _ = False
openPat (HsPParen p) = openPat p
openPat (HsPNeg p) = openPat p
openPat (HsPAsPat _ p) = openPat p
openPat (HsPTypeSig _ p _) = openPat p
openPat (HsPInfixApp a n b) = HsPApp n [a,b]
openPat p = p
| null | https://raw.githubusercontent.com/csabahruska/jhc-components/a7dace481d017f5a83fbfc062bdd2d099133adf1/jhc-frontend/src/FrontEnd/Desugar.hs | haskell | various desugaring routines
The general desugaring routine creates selectors for data
constructors with named fields, changes all pattern bindings
into 'simple' pattern bindings, and adds failure cases to lambda
expressions which have failable patterns
a new (unique) name introduced in pattern selector functions
(0::Int)
desugarDecl pb@(HsPatBind sloc p rhs wheres) = do
newRhs <- desugarRhs rhs
return [HsPatBind sloc p newRhs (concat newWheres)]
variable pattern bindings remain unchanged
desugarDecl (HsPatBind sloc pat rhs wheres) = do
XXX we currently discard instance specializations
generate the pattern bindings for each variable in a pattern
generate selector functions for each of the variables that
are bound in a pattern
replaces all occurrences of a name with a new variable
and every other name with underscore
f name p = error $ "replaceVarNamesInPat: " ++ show (name,p) |
module FrontEnd.Desugar (desugarHsModule, desugarHsStmt) where
import FrontEnd.HsSyn
import FrontEnd.SrcLoc
import FrontEnd.Syn.Traverse
import Name.Names
import Ty.Level
import Util.Std
import Util.UniqueMonad
type PatSM = Uniq
instance MonadSrcLoc PatSM where
instance MonadSetSrcLoc PatSM where
withSrcLoc' _ a = a
newPatVarName :: HsName
newPatVarName = toName Val ("pv@"::String)
desugarHsModule :: HsModule -> HsModule
desugarHsModule m = hsModuleDecls_s ds' m where
dsm ds = fmap concat $ mapM desugarDecl ds
desugarHsStmt :: Monad m => HsStmt -> m HsStmt
desugarHsStmt s = return $ fst $ runUniq 0 (desugarStmt s)
desugarDecl :: HsDecl -> PatSM [HsDecl]
desugarDecl (HsFunBind matches) = do
newMatches <- mapM desugarMatch matches
return [HsFunBind newMatches]
desugarDecl where s
desugarDecl HsPatBind { hsDeclPat = hsDeclPat@HsPVar {}, .. } = do
hsDeclRhs <- desugarRhs hsDeclRhs
hsDeclDecls <- concat <$> mapM desugarDecl hsDeclDecls
return [HsPatBind { .. }]
desugarDecl HsPatBind { .. } = do
hsDeclRhs <- desugarRhs hsDeclRhs
hsDeclDecls <- concat <$> mapM desugarDecl hsDeclDecls
unique <- newUniq
let newRhsName = toName Val ("rhs@" ++ show unique)
let newBinds = genBindsForPat hsDeclPat hsDeclSrcLoc newRhsName
newBinds <- concat <$> mapM desugarDecl newBinds
let newTopDeclForRhs = HsPatBind { hsDeclPat = HsPVar newRhsName, .. }
return (newTopDeclForRhs : newBinds)
desugarDecl (HsClassDecl sloc qualtype decls) = do
newDecls <- mapM desugarDecl decls
return [HsClassDecl sloc qualtype (concat newDecls)]
desugarDecl (HsInstDecl sloc qualtype decls) = do
newDecls <- mapM desugarDecl decls
return [HsInstDecl sloc qualtype (concat newDecls)]
desugarDecl HsPragmaSpecialize { hsDeclName = n } | n == u_instance = return []
desugarDecl anyOtherDecl = return [anyOtherDecl]
desugarMatch :: (HsMatch) -> PatSM (HsMatch)
desugarMatch (HsMatch sloc funName pats rhs wheres) = do
newWheres <- mapM desugarDecl wheres
newRhs <- desugarRhs rhs
return (HsMatch sloc funName pats newRhs (concat newWheres))
genBindsForPat :: HsPat -> SrcLoc -> HsName -> [HsDecl]
genBindsForPat pat sloc rhs = ans where
ans = [HsPatBind sloc (HsPVar pn) (HsUnGuardedRhs selector) [] | (pn, selector) <- selFuns]
selFuns = getPatSelFuns sloc pat rhs
getPatSelFuns :: SrcLoc -> HsPat -> Name -> [(Name, HsExp)]
getPatSelFuns sloc pat rhsvar = ans where
ans = [ ( v , ( HsLambda sloc [ HsPVar newPatVarName ] ( kase ( replaceVarNamesInPat v pat ) ) ) ) | v < - getNamesFromHsPat pat , nameType v = = Val ]
ans = [(v, kase (replaceVarNamesInPat v pat)) | v <- getNamesFromHsPat pat, nameType v == Val]
kase p = HsCase (HsVar rhsvar) [a1, a2 ] where
a1 = HsAlt sloc p (HsUnGuardedRhs (HsVar newPatVarName)) []
a2 = HsAlt sloc HsPWildCard (HsUnGuardedRhs (HsError { hsExpSrcLoc = sloc, hsExpErrorType = HsErrorPatternFailure, hsExpString = show sloc ++ " failed pattern match" })) []
a2 = HsAlt sloc HsPWildCard ( HsUnGuardedRhs ( HsApp ( HsVar ( toName ( " error"::String ) ) ) ( HsLit $ HsString $ show sloc + + " failed pattern match " ) ) ) [ ]
replaceVarNamesInPat :: HsName -> HsPat -> HsPat
replaceVarNamesInPat name p = f p where
f (HsPVar name2)
| name == name2 = HsPVar newPatVarName
| getTyLevel name2 == Just termLevel = HsPWildCard
f (HsPAsPat asName pat)
| name == asName = HsPAsPat newPatVarName (f pat)
| getTyLevel asName == Just termLevel = f pat
f p = runIdentity $ traverseHsPat (return . f) p
desugarRhs :: HsRhs -> PatSM HsRhs
desugarRhs = traverseHsExp desugarExp
desugarExp :: HsExp -> PatSM HsExp
desugarExp (HsLambda sloc pats e)
| all isSimplePat pats = do
newE <- desugarExp e
return (HsLambda sloc pats newE)
desugarExp (HsLambda sloc pats e) = do
ps <- mapM f pats
let (xs,zs) = unzip ps
e' <- (ne e $ concat zs)
return (HsLambda sloc (map HsPVar xs) e')
where
ne e [] = desugarExp e
ne e ((n,p):zs) = do
e' <- ne e zs
let a1 = HsAlt sloc p (HsUnGuardedRhs e') []
a2 = HsAlt sloc HsPWildCard (HsUnGuardedRhs (HsError { hsExpSrcLoc = sloc, hsExpErrorType = HsErrorPatternFailure, hsExpString = show sloc ++ " failed pattern match in lambda" })) []
return $ HsCase (HsVar n) [a1, a2 ]
f (HsPVar x) = return (x,[])
f (HsPAsPat n p) = return (n,[(n,p)])
f p = do
unique <- newUniq
let n = toName Val ("lambind@" ++ show unique)
return (n,[(n,p)])
desugarExp (HsLet decls e) = do
newDecls <- mapM desugarDecl decls
HsLet (concat newDecls) <$> desugarExp e
desugarExp (HsCase e alts) = do
newE <- desugarExp e
newAlts <- mapM desugarAlt alts
return (HsCase newE newAlts)
desugarExp (HsDo stmts) = HsDo `liftM` mapM desugarStmt stmts
desugarExp e = traverseHsExp desugarExp e
desugarAlt :: (HsAlt) -> PatSM (HsAlt)
desugarAlt (HsAlt sloc pat gAlts wheres) = do
newGAlts <- desugarRhs gAlts
newWheres <- mapM desugarDecl wheres
return (HsAlt sloc pat newGAlts (concat newWheres))
desugarStmt :: (HsStmt) -> PatSM (HsStmt)
desugarStmt (HsLetStmt decls) = do
newDecls <- mapM desugarDecl decls
return (HsLetStmt $ concat newDecls)
desugarStmt (HsGenerator srcLoc pat e) = HsGenerator srcLoc pat <$> desugarExp e
desugarStmt (HsQualifier e) = HsQualifier <$> desugarExp e
isSimplePat p = f (openPat p) where
f HsPVar {} = True
f HsPWildCard = True
f _ = False
openPat (HsPParen p) = openPat p
openPat (HsPNeg p) = openPat p
openPat (HsPAsPat _ p) = openPat p
openPat (HsPTypeSig _ p _) = openPat p
openPat (HsPInfixApp a n b) = HsPApp n [a,b]
openPat p = p
|
cf5c0f8f9215847b9cb374bf062a3e297399cabd8e22831eca5497a19775ba57 | yutopp/rill | sema_type.ml |
* Copyright yutopp 2017 - .
*
* Distributed under the Boost Software License , Version 1.0 .
* ( See accompanying file LICENSE_1_0.txt or copy at
* )
* Copyright yutopp 2017 - .
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* )
*)
let create_new_lt_var ?(tmp=false) lt_name parent_env =
let ctx_env = parent_env.Env.context_env in
let var_id = Lifetime.Var_id.generate () in
let lt = match tmp with
false -> Lifetime.LtVar (var_id, lt_name, ctx_env.Env.env_id, Int32.of_int 0, [var_id], Loc.dummy)
| true -> Lifetime.LtVarPlaceholder (var_id, Loc.dummy)
in
lt
let make_class_type ?(new_instance=false) cenv attr env ctx =
let open Sema_context in
let open Type_sets in
let {
Type_attr.ta_ref_val = rv;
Type_attr.ta_mut = mut;
} = attr
in
let ts = Type_info.UniqueTy cenv in
let cr = Env.ClassOp.get_record cenv in
let template_args = cr.Env.cls_template_vals in
let aux_generics_args = match rv with
| Type_attr.Ref [] -> [create_new_lt_var ~tmp:new_instance (Id_string.Pure "`_") env]
| Type_attr.Val -> []
| _ -> failwith "[ICE]"
in
let generics_args =
match new_instance with
| true ->
let gen_same_name_lt_var lt =
match lt with
| Lifetime.LtVar (_, spec, _, _, _, _) ->
create_new_lt_var ~tmp:true spec cenv
| _ -> failwith ""
in
cr.Env.cls_generics_vals |> List.map gen_same_name_lt_var
| false ->
cr.Env.cls_generics_vals
in
Type.Generator.generate_type ~aux_generics_args:aux_generics_args
ctx.sc_tsets.ts_type_gen
ts template_args generics_args attr
(*
* exclude parameters which are not required to call.
* (Ex. has a default value)
* Currently, it is not supported.
*)
let exclude_optional_params param_kinds =
let rec exclude_optional_params' param_kinds acc =
match param_kinds with
| [] -> acc
| (k :: ks) ->
begin
match k with
| Env.FnParamKindType ty ->
exclude_optional_params' ks (ty :: acc)
end
in
exclude_optional_params' param_kinds []
|> List.rev
| null | https://raw.githubusercontent.com/yutopp/rill/375b67c03ab2087d0a2a833bd9e80f3e51e2694f/rillc/_migrating/sema_type.ml | ocaml |
* exclude parameters which are not required to call.
* (Ex. has a default value)
* Currently, it is not supported.
|
* Copyright yutopp 2017 - .
*
* Distributed under the Boost Software License , Version 1.0 .
* ( See accompanying file LICENSE_1_0.txt or copy at
* )
* Copyright yutopp 2017 - .
*
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* )
*)
let create_new_lt_var ?(tmp=false) lt_name parent_env =
let ctx_env = parent_env.Env.context_env in
let var_id = Lifetime.Var_id.generate () in
let lt = match tmp with
false -> Lifetime.LtVar (var_id, lt_name, ctx_env.Env.env_id, Int32.of_int 0, [var_id], Loc.dummy)
| true -> Lifetime.LtVarPlaceholder (var_id, Loc.dummy)
in
lt
let make_class_type ?(new_instance=false) cenv attr env ctx =
let open Sema_context in
let open Type_sets in
let {
Type_attr.ta_ref_val = rv;
Type_attr.ta_mut = mut;
} = attr
in
let ts = Type_info.UniqueTy cenv in
let cr = Env.ClassOp.get_record cenv in
let template_args = cr.Env.cls_template_vals in
let aux_generics_args = match rv with
| Type_attr.Ref [] -> [create_new_lt_var ~tmp:new_instance (Id_string.Pure "`_") env]
| Type_attr.Val -> []
| _ -> failwith "[ICE]"
in
let generics_args =
match new_instance with
| true ->
let gen_same_name_lt_var lt =
match lt with
| Lifetime.LtVar (_, spec, _, _, _, _) ->
create_new_lt_var ~tmp:true spec cenv
| _ -> failwith ""
in
cr.Env.cls_generics_vals |> List.map gen_same_name_lt_var
| false ->
cr.Env.cls_generics_vals
in
Type.Generator.generate_type ~aux_generics_args:aux_generics_args
ctx.sc_tsets.ts_type_gen
ts template_args generics_args attr
let exclude_optional_params param_kinds =
let rec exclude_optional_params' param_kinds acc =
match param_kinds with
| [] -> acc
| (k :: ks) ->
begin
match k with
| Env.FnParamKindType ty ->
exclude_optional_params' ks (ty :: acc)
end
in
exclude_optional_params' param_kinds []
|> List.rev
|
8f4636417d0af3baf97d0d71ddfc0313294edebe6ee77fe4ca8a43408481b9cf | erikd/system-linux-proc | test-io.hs |
import Hedgehog.Main (defaultMain)
import qualified Test.System.Linux.Proc
main :: IO ()
main =
defaultMain
[ Test.System.Linux.Proc.tests
]
| null | https://raw.githubusercontent.com/erikd/system-linux-proc/b5fc50b0e9f2b28a92c3152908799e53ae16589e/test/test-io.hs | haskell |
import Hedgehog.Main (defaultMain)
import qualified Test.System.Linux.Proc
main :: IO ()
main =
defaultMain
[ Test.System.Linux.Proc.tests
]
|
|
4dfb25a455c2f537821a1ade0d76008dce7b76a61f546e151263e74c3132b3da | cgrand/regex | charset.clj | (ns net.cgrand.regex.charset
(:refer-clojure :exclude [complement * + - not contains?]))
(defprotocol Rangeable
(ranges [cs]))
(defprotocol Charsetable
(charset [x]))
(extend-protocol Rangeable
String
(ranges [s] (map vector s s))
Character
(ranges [c] [[c c]])
java.lang.Number
(ranges [n] [[n n]])
clojure.lang.APersistentMap
(ranges [m] (seq m))
clojure.lang.APersistentSet
(ranges [s] (map (fn [x] [x x]) s))
nil
(ranges [_] nil))
(defrecord Charset [cs]
Rangeable
(ranges [_] (seq cs))
Charsetable
(charset [this] this))
(defn lt [[a b] [c d]]
(boolean (and b c (< (int b) (int c)))))
(def no-char (Charset. (sorted-set-by lt)))
(extend-protocol Charsetable
nil
(charset [_]
no-char))
(defn- pred [c]
(when (and c (pos? (int c))) (char (dec (int c)))))
(defn- succ [c]
(when (and c (< (int c) 0xFFFF)) (char (inc (int c)))))
(defn- split
"Splits ranges right after x."
[cs x]
(if-let [[a b :as r] (when x (get cs [x x]))]
(if (or (= b x) (and b x (= (int b) (int x))))
cs
(-> cs (disj r) (conj [a x] [(succ x) b])))
cs))
(defn- between [rs a b]
(cond
(and a b) (subseq rs >= [a a] <= [b b])
a (subseq rs >= [a a])
b (subseq rs <= [b b])
:else (seq rs)))
(defn- subtract [cs [a b]]
(let [rs (-> cs :cs (split (pred a)) (split b))]
(Charset. (reduce disj rs (between rs a b)))))
(defn- add [cs [a b]]
(let [rs (:cs cs)
aa (pred a)
bb (succ b)
a (when a (first (get rs [aa aa] [a a])))
b (when b (second (get rs [bb bb] [b b])))]
(Charset. (conj (reduce disj rs (between rs a b))
[a b]))))
(def any-char (add no-char [nil nil]))
(extend-protocol Charsetable
Object
(charset [x]
(reduce add no-char (ranges x))))
(defn + "union"
([] no-char)
([a] (charset a))
([a b]
(reduce add (charset a) (ranges b)))
([a b & cs]
(reduce + (+ a b) cs)))
(defn - "complement or asymetric difference"
([x] (reduce subtract any-char (ranges x)))
([x & xs]
(reduce #(reduce subtract %1 (ranges %2)) x xs)))
(defn * "intersection"
([] any-char)
([a] (charset a))
([a b]
(- (+ (- a) (- b))))
([a b & cs]
(- (reduce + (+ (- a) (- b)) (map - cs)))))
(defn not [& xs]
(- (reduce + xs)))
(defn pick
"Returns a character contained in the charset or nil if the
charset is empty."
[cs]
(when-let [[a b] (first (ranges cs))]
(or a \u0000)))
(defn has? [cs c]
(boolean ((:cs (charset cs)) [c c])))
(defn disjunctive-union
"as and bs are collection of disjunct charsets, returns their union as a
collection of smaller disjunct charsets."
([] nil)
([as] as)
([as bs]
(let [A (reduce + as)
B (reduce + bs)]
(filter pick
(concat
(map #(- % B) as)
(map #(- % A) bs)
(for [a as b bs] (* a b)))))))
(defn disjunctive-intersection
"as and bs are collection of disjunct charsets, returns their intersection
as a collection of smaller disjunct charsets."
([] [any-char])
([as] as)
([as bs]
(filter pick
(for [a as b bs] (* a b)))))
| null | https://raw.githubusercontent.com/cgrand/regex/200ce13e07b7c4da67b2d2dafc9f708cffeec66c/src/net/cgrand/regex/charset.clj | clojure | (ns net.cgrand.regex.charset
(:refer-clojure :exclude [complement * + - not contains?]))
(defprotocol Rangeable
(ranges [cs]))
(defprotocol Charsetable
(charset [x]))
(extend-protocol Rangeable
String
(ranges [s] (map vector s s))
Character
(ranges [c] [[c c]])
java.lang.Number
(ranges [n] [[n n]])
clojure.lang.APersistentMap
(ranges [m] (seq m))
clojure.lang.APersistentSet
(ranges [s] (map (fn [x] [x x]) s))
nil
(ranges [_] nil))
(defrecord Charset [cs]
Rangeable
(ranges [_] (seq cs))
Charsetable
(charset [this] this))
(defn lt [[a b] [c d]]
(boolean (and b c (< (int b) (int c)))))
(def no-char (Charset. (sorted-set-by lt)))
(extend-protocol Charsetable
nil
(charset [_]
no-char))
(defn- pred [c]
(when (and c (pos? (int c))) (char (dec (int c)))))
(defn- succ [c]
(when (and c (< (int c) 0xFFFF)) (char (inc (int c)))))
(defn- split
"Splits ranges right after x."
[cs x]
(if-let [[a b :as r] (when x (get cs [x x]))]
(if (or (= b x) (and b x (= (int b) (int x))))
cs
(-> cs (disj r) (conj [a x] [(succ x) b])))
cs))
(defn- between [rs a b]
(cond
(and a b) (subseq rs >= [a a] <= [b b])
a (subseq rs >= [a a])
b (subseq rs <= [b b])
:else (seq rs)))
(defn- subtract [cs [a b]]
(let [rs (-> cs :cs (split (pred a)) (split b))]
(Charset. (reduce disj rs (between rs a b)))))
(defn- add [cs [a b]]
(let [rs (:cs cs)
aa (pred a)
bb (succ b)
a (when a (first (get rs [aa aa] [a a])))
b (when b (second (get rs [bb bb] [b b])))]
(Charset. (conj (reduce disj rs (between rs a b))
[a b]))))
(def any-char (add no-char [nil nil]))
(extend-protocol Charsetable
Object
(charset [x]
(reduce add no-char (ranges x))))
(defn + "union"
([] no-char)
([a] (charset a))
([a b]
(reduce add (charset a) (ranges b)))
([a b & cs]
(reduce + (+ a b) cs)))
(defn - "complement or asymetric difference"
([x] (reduce subtract any-char (ranges x)))
([x & xs]
(reduce #(reduce subtract %1 (ranges %2)) x xs)))
(defn * "intersection"
([] any-char)
([a] (charset a))
([a b]
(- (+ (- a) (- b))))
([a b & cs]
(- (reduce + (+ (- a) (- b)) (map - cs)))))
(defn not [& xs]
(- (reduce + xs)))
(defn pick
"Returns a character contained in the charset or nil if the
charset is empty."
[cs]
(when-let [[a b] (first (ranges cs))]
(or a \u0000)))
(defn has? [cs c]
(boolean ((:cs (charset cs)) [c c])))
(defn disjunctive-union
"as and bs are collection of disjunct charsets, returns their union as a
collection of smaller disjunct charsets."
([] nil)
([as] as)
([as bs]
(let [A (reduce + as)
B (reduce + bs)]
(filter pick
(concat
(map #(- % B) as)
(map #(- % A) bs)
(for [a as b bs] (* a b)))))))
(defn disjunctive-intersection
"as and bs are collection of disjunct charsets, returns their intersection
as a collection of smaller disjunct charsets."
([] [any-char])
([as] as)
([as bs]
(filter pick
(for [a as b bs] (* a b)))))
|
|
06a1df9dbe8a5a652ab9208cd62caa186b9eecd6de4e8b881f633c99c316d1cc | tezos-checker/checker | burrow.ml | open FixedPoint
open Kit
open Tok
open Parameters
open LiquidationAuctionPrimitiveTypes
open Constants
open Error
open Common
[@@@coverage off]
type burrow =
{ (* Whether the creation deposit for the burrow has been paid. If the
* creation deposit has been paid, the burrow is considered "active" and
* "closed"/inactive otherwise. Paying the creation deposit re-activates
* a "closed" burrow. *)
active : bool;
(* Address of the contract holding the burrow's collateral. *)
address: Ligo.address;
(* Collateral currently stored in the burrow. *)
collateral : tok;
(* Outstanding kit minted out of the burrow. *)
outstanding_kit : kit;
(* The imbalance adjustment index observed the last time the burrow was
* touched. *)
adjustment_index : fixedpoint;
(* Collateral that has been sent off to auctions. For all intents and
* purposes, this collateral can be considered gone, but depending on the
* outcome of the auctions we expect some kit in return. *)
collateral_at_auction : tok;
(* The timestamp checker had the last time the burrow was touched. *)
last_checker_timestamp : Ligo.timestamp;
}
[@@deriving show]
type liquidation_details =
{ liquidation_reward : tok;
collateral_to_auction : tok;
burrow_state : burrow;
}
[@@deriving show]
type liquidation_type =
(* partial: some collateral remains in the burrow *)
| Partial
(* complete: deplete the collateral *)
| Complete
(* complete: deplete the collateral AND the creation deposit *)
| Close
[@@deriving show]
type liquidation_result = (liquidation_type * liquidation_details) option
[@@deriving show]
[@@@coverage on]
(** Update the outstanding kit, update the adjustment index, and the timestamp. *)
let burrow_touch (p: parameters) (burrow: burrow) : burrow =
let burrow_out = if p.last_touched = burrow.last_checker_timestamp
then
burrow
else
let current_adjustment_index = compute_adjustment_index p in
{ burrow with
outstanding_kit =
kit_of_fraction_floor
(Ligo.mul_nat_int
(kit_to_denomination_nat burrow.outstanding_kit)
(fixedpoint_to_raw current_adjustment_index)
)
(Ligo.mul_int_int
kit_scaling_factor_int
(fixedpoint_to_raw burrow.adjustment_index)
);
adjustment_index = current_adjustment_index;
last_checker_timestamp = p.last_touched;
}
in
assert (burrow.address = burrow_out.address);
burrow_out
let[@inline] burrow_address (b: burrow) : Ligo.address =
b.address
* Computes the total amount of tok associated with a burrow . This includes
* the collateral , collateral_at_auction , and the creation_deposit if the
* burrow is active .
* the collateral, collateral_at_auction, and the creation_deposit if the
* burrow is active. *)
let burrow_total_associated_tok (b: burrow) : tok =
tok_add
(tok_add b.collateral b.collateral_at_auction)
(if b.active then creation_deposit else tok_zero)
let[@inline] burrow_collateral_at_auction (b: burrow) : tok =
b.collateral_at_auction
(** Under-collateralization condition: tok < f * kit * price. *)
let[@inline] undercollateralization_condition (f: ratio) (price: ratio) (tok: ratio) (kit: ratio) : bool =
let { num = num_f; den = den_f; } = f in
let { num = num_p; den = den_p; } = price in
let { num = num_tz; den = den_tz; } = tok in
let { num = num_kt; den = den_kt; } = kit in
let lhs =
Ligo.mul_int_int
(Ligo.mul_int_int num_tz den_f)
(Ligo.mul_int_int den_kt den_p) in
let rhs =
Ligo.mul_int_int
(Ligo.mul_int_int num_f num_kt)
(Ligo.mul_int_int den_tz num_p) in
Ligo.lt_int_int lhs rhs
(** Check whether a burrow is overburrowed. A burrow is overburrowed if
*
* collateral < fminting * kit_outstanding * minting_price
*
* The quantity collateral / (fminting * minting_price) we call the burrowing
* limit (normally kit_outstanding <= burrowing_limit). NOTE: for the
* purposes of minting/checking overburrowedness, we do not take into
* account expected kit from pending auctions; for all we know, this could
* be lost forever.
*)
let burrow_is_overburrowed (p: parameters) (b: burrow) : bool =
assert (p.last_touched = b.last_checker_timestamp);
let tok = { num = tok_to_denomination_int b.collateral; den = tok_scaling_factor_int; } in
let kit = { num = kit_to_denomination_int b.outstanding_kit; den = kit_scaling_factor_int; } in
undercollateralization_condition fminting (minting_price p) tok kit
(* max_kit_outstanding = FLOOR (collateral / (fminting * minting_price)) *)
let burrow_max_mintable_kit (p: parameters) (b: burrow) : kit =
assert (p.last_touched = b.last_checker_timestamp);
let { num = num_fm; den = den_fm; } = fminting in
let { num = num_mp; den = den_mp; } = minting_price p in
let numerator =
Ligo.mul_nat_int
(tok_to_denomination_nat b.collateral)
(Ligo.mul_int_int den_fm den_mp) in
let denominator =
Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.mul_int_int num_fm num_mp) in
kit_of_fraction_floor numerator denominator
let burrow_return_slice_from_auction
(slice: liquidation_slice_contents)
(burrow: burrow)
: burrow =
assert burrow.active;
assert (geq_tok_tok burrow.collateral_at_auction slice.tok);
let burrow_out =
{ burrow with
collateral = tok_add burrow.collateral slice.tok;
collateral_at_auction = tok_sub burrow.collateral_at_auction slice.tok;
} in
assert (burrow.address = burrow_out.address);
burrow_out
let burrow_return_kit_from_auction
(slice: liquidation_slice_contents)
(kit: kit)
(burrow: burrow) : burrow * kit * kit =
assert (geq_tok_tok burrow.collateral_at_auction slice.tok);
let returned_kit = kit_min burrow.outstanding_kit kit in
let excess_kit = kit_sub kit returned_kit in
let burrow_out =
{ burrow with
outstanding_kit = kit_sub burrow.outstanding_kit returned_kit;
collateral_at_auction = tok_sub burrow.collateral_at_auction slice.tok;
} in
assert (burrow.address = burrow_out.address);
assert (eq_kit_kit (kit_add returned_kit excess_kit) kit);
(burrow_out, returned_kit, excess_kit)
let burrow_create (p: parameters) (addr: Ligo.address) (tok: tok) : burrow =
if lt_tok_tok tok creation_deposit
then (Ligo.failwith error_InsufficientFunds : burrow)
else
{ active = true;
address = addr;
collateral = tok_sub tok creation_deposit;
outstanding_kit = kit_zero;
adjustment_index = compute_adjustment_index p;
collateral_at_auction = tok_zero;
last_checker_timestamp = p.last_touched; (* NOTE: If checker is up-to-date, the timestamp should be _now_. *)
}
(** Add non-negative collateral to a burrow. *)
TOKFIX : we need a more generic name ( e.g. , deposit_collateral )
let[@inline] burrow_deposit_collateral (p: parameters) (t: tok) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow_out = { b with collateral = tok_add b.collateral t } in
assert (b.address = burrow_out.address);
burrow_out
(** Withdraw a non-negative amount of collateral from the burrow, as long as
* this will not overburrow it. *)
TOKFIX : we need a more generic name ( e.g. , withdraw_collateral )
let burrow_withdraw_collateral (p: parameters) (t: tok) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow = { b with collateral = tok_sub b.collateral t } in
let burrow_out = if burrow_is_overburrowed p burrow
then (Ligo.failwith error_WithdrawTezFailure : burrow)
else burrow
in
assert (b.address = burrow_out.address);
burrow_out
(** Mint a non-negative amount of kits from the burrow, as long as this will
* not overburrow it *)
let burrow_mint_kit (p: parameters) (kit: kit) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow_out =
let burrow = { b with outstanding_kit = kit_add b.outstanding_kit kit } in
if burrow_is_overburrowed p burrow
then (Ligo.failwith error_MintKitFailure : burrow)
else burrow
in
assert (b.address = burrow_out.address);
burrow_out
(** Deposit/burn a non-negative amount of kit to the burrow. Return the amount
* of kit burned. *)
let[@inline] burrow_burn_kit (p: parameters) (kit: kit) (b: burrow) : burrow * kit =
let b = burrow_touch p b in
let actual_burned = kit_min b.outstanding_kit kit in
let burrow_out = {b with outstanding_kit = kit_sub b.outstanding_kit actual_burned} in
assert (b.address = burrow_out.address);
(burrow_out, actual_burned)
(** Activate a currently inactive burrow. This operation will fail if either
* the burrow is already active, or if the amount of tez given is less than
* the creation deposit. *)
let burrow_activate (p: parameters) (tok: tok) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow_out =
if lt_tok_tok tok creation_deposit then
(Ligo.failwith error_InsufficientFunds : burrow)
else if b.active then
(Ligo.failwith error_BurrowIsAlreadyActive : burrow)
else
{ b with
active = true;
collateral = tok_sub tok creation_deposit;
}
in
assert (b.address = burrow_out.address);
burrow_out
* Deativate a currently active burrow . This operation will fail if the burrow
* ( a ) is already inactive , or ( b ) is overburrowed , or ( c ) has kit
* outstanding , or ( d ) has collateral sent off to auctions .
* (a) is already inactive, or (b) is overburrowed, or (c) has kit
* outstanding, or (d) has collateral sent off to auctions. *)
let burrow_deactivate (p: parameters) (b: burrow) : (burrow * tok) =
let b = burrow_touch p b in
let burrow_out, return =
if burrow_is_overburrowed p b then
(Ligo.failwith error_DeactivatingAnOverburrowedBurrow : (burrow * tok))
else if (not b.active) then
(Ligo.failwith error_DeactivatingAnInactiveBurrow : (burrow * tok))
else if gt_kit_kit b.outstanding_kit kit_zero then
(Ligo.failwith error_DeactivatingWithOutstandingKit : (burrow * tok))
else if gt_tok_tok b.collateral_at_auction tok_zero then
(Ligo.failwith error_DeactivatingWithCollateralAtAuctions : (burrow * tok))
else
let return = tok_add b.collateral creation_deposit in
let updated_burrow =
{ b with
active = false;
collateral = tok_zero;
} in
(updated_burrow, return)
in
assert (b.address = burrow_out.address);
burrow_out, return
(* ************************************************************************* *)
(** LIQUIDATION-RELATED *)
(* ************************************************************************* *)
(** Compute the number of tez that needs to be auctioned off so that the burrow
* can return to a state when it is no longer overburrowed or having a risk of
* liquidation (assuming the current expected minting price). For its
* calculation, see docs/burrow-state-liquidations.md. Note that it's skewed
* on the safe side (overapproximation). This ensures that after a partial
* liquidation we are no longer "optimistically overburrowed".
* Returns the number of tez in mutez *)
let compute_collateral_to_auction (p: parameters) (b: burrow) : Ligo.int =
let { num = num_fm; den = den_fm; } = fminting in
let { num = num_mp; den = den_mp; } = minting_price p in
Note that num_lp and den_lp here are actually = 1 - liquidation_penalty
let { num = num_lp; den = den_lp; } =
let { num = num_lp; den = den_lp; } = liquidation_penalty in
{ num = Ligo.sub_int_int den_lp num_lp; den = den_lp; }
in
numerator = tez_sf * den_lp * num_fm * num_mp * outstanding_kit
- kit_sf * den_mp * ( num_lp * num_fm * * den_fm * collateral )
- kit_sf * den_mp * (num_lp * num_fm * collateral_at_auctions + den_lp * den_fm * collateral) *)
let numerator =
Ligo.sub_int_int
(Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.mul_int_int
den_lp
(Ligo.mul_int_int
num_fm
(Ligo.mul_int_nat
num_mp
(kit_to_denomination_nat b.outstanding_kit)
)
)
)
)
(Ligo.mul_int_int
(Ligo.mul_int_int kit_scaling_factor_int den_mp)
(Ligo.add_int_int
(Ligo.mul_int_int num_lp (Ligo.mul_int_nat num_fm (tok_to_denomination_nat b.collateral_at_auction)))
(Ligo.mul_int_int den_lp (Ligo.mul_int_nat den_fm (tok_to_denomination_nat b.collateral)))
)
) in
denominator = ( kit_sf * den_mp * tez_sf ) * ( num_lp * num_fm - den_lp * den_fm )
let denominator =
Ligo.mul_int_int
kit_scaling_factor_int
(Ligo.mul_int_int
den_mp
(Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.sub_int_int
(Ligo.mul_int_int num_lp num_fm)
(Ligo.mul_int_int den_lp den_fm)
)
)
) in
cdiv_int_int (Ligo.mul_int_int numerator tok_scaling_factor_int) denominator
(** Compute the amount of kit we expect to receive from auctioning off an
* amount of tez, using the current minting price. Since this is an artifice,
* a mere expectation, we neither floor nor ceil, but instead return the
* lossless fraction as is. *)
let compute_expected_kit (p: parameters) (collateral_to_auction: tok) : ratio =
let { num = num_lp; den = den_lp; } = liquidation_penalty in
let { num = num_mp; den = den_mp; } = minting_price p in
let numerator =
Ligo.mul_nat_int
(tok_to_denomination_nat collateral_to_auction)
(Ligo.mul_int_int
(Ligo.sub_int_int den_lp num_lp)
den_mp
) in
let denominator =
Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.mul_int_int den_lp num_mp) in
{ num = numerator; den = denominator; }
(** Check whether a burrow can be marked for liquidation. A burrow can be
* marked for liquidation if:
*
* tez_collateral < fliquidation * (kit_outstanding - expected_kit_from_auctions) * liquidation_price
*
* The quantity tez_collateral / (fliquidation * liquidation_price) we call the
* liquidation limit. Note that for this check we optimistically take into
* account the expected kit from pending auctions (using the current minting
* price) when computing the outstanding kit. Note that only active burrows
* can be liquidated; inactive ones are dormant, until either all pending
* auctions finish or if their creation deposit is restored. *)
let burrow_is_liquidatable (p: parameters) (b: burrow) : bool =
assert (p.last_touched = b.last_checker_timestamp);
let tez = { num = tok_to_denomination_int b.collateral; den = tok_scaling_factor_int; } in
let kit = (* kit = kit_outstanding - expected_kit_from_auctions *)
let { num = num_ek; den = den_ek; } = compute_expected_kit p b.collateral_at_auction in
{ num =
Ligo.sub_int_int
(Ligo.mul_nat_int (kit_to_denomination_nat b.outstanding_kit) den_ek)
(Ligo.mul_int_int kit_scaling_factor_int num_ek);
den = Ligo.mul_int_int kit_scaling_factor_int den_ek;
} in
b.active && undercollateralization_condition fliquidation (liquidation_price p) tez kit
(** Check whether the return of a slice to its burrow (cancellation) is
* warranted. For the cancellation to be warranted, it must be the case that
* after returning the slice to the burrow, the burrow is optimistically
* non-overburrowed (i.e., if all remaining collateral at auction sells at the
* current price but with penalties paid, the burrow becomes underburrowed):
*
* collateral + slice >= fminting * (outstanding - compute_expected_kit (collateral_at_auction - slice)) * minting_price
*
* Note that only active burrows can be liquidated; inactive ones are dormant,
* until either all pending auctions finish or if their creation deposit is
* restored. *)
let burrow_is_cancellation_warranted (p: parameters) (b: burrow) (slice_tok: tok) : bool =
assert (p.last_touched = b.last_checker_timestamp);
assert (geq_tok_tok b.collateral_at_auction slice_tok);
let tez = (* tez = collateral + slice *)
{ num = tok_to_denomination_int (tok_add b.collateral slice_tok);
den = tok_scaling_factor_int;
} in
let kit = (* kit = outstanding - compute_expected_kit (collateral_at_auction - slice) *)
let { num = num_ek; den = den_ek; } =
compute_expected_kit p (tok_sub b.collateral_at_auction slice_tok) in
{ num =
Ligo.sub_int_int
(Ligo.mul_nat_int (kit_to_denomination_nat b.outstanding_kit) den_ek)
(Ligo.mul_int_int kit_scaling_factor_int num_ek);
den = Ligo.mul_int_int kit_scaling_factor_int den_ek;
} in
b.active && not (undercollateralization_condition fminting (minting_price p) tez kit)
* Compute the minumum amount of kit to receive for considering the
* liquidation unwarranted , calculated as ( see
* docs / burrow - state - liquidations.md for the derivation of this formula ):
*
* collateral_to_auction * ( fliquidation * ( outstanding_kit - expected_kit_from_auctions ) ) / collateral
*
* If the burrow has no collateral left in it ( e.g. , right after a successful
* Complete - liquidation ) then we have two cases :
* ( a ) If the outstanding kit is non - zero then there is no way for this
* liquidation to be considered unwarranted . outstanding_kit is infinitely
* many times greater than the collateral .
* ( b ) If the outstanding kit is also zero then the liquidation in question
* should n't have happened ( so it is by definition unwarranted ) . I think
* that this is impossible in practice , but it 's probably best to account
* for it so that the function is not partial .
* liquidation unwarranted, calculated as (see
* docs/burrow-state-liquidations.md for the derivation of this formula):
*
* collateral_to_auction * (fliquidation * (outstanding_kit - expected_kit_from_auctions)) / collateral
*
* If the burrow has no collateral left in it (e.g., right after a successful
* Complete-liquidation) then we have two cases:
* (a) If the outstanding kit is non-zero then there is no way for this
* liquidation to be considered unwarranted. outstanding_kit is infinitely
* many times greater than the collateral.
* (b) If the outstanding kit is also zero then the liquidation in question
* shouldn't have happened (so it is by definition unwarranted). I think
* that this is impossible in practice, but it's probably best to account
* for it so that the function is not partial.
*)
let[@inline] compute_min_kit_for_unwarranted (p: parameters) (b: burrow) (collateral_to_auction: tok) : kit option =
assert (p.last_touched = b.last_checker_timestamp);
NOTE : division by zero .
then
if not (eq_kit_kit b.outstanding_kit (kit_of_denomination (Ligo.nat_from_literal "0n")))
then (None: kit option) (* (a): infinity, basically *)
( b ): zero
else
let { num = num_fl; den = den_fl; } = fliquidation in
let { num = num_ek; den = den_ek; } = compute_expected_kit p b.collateral_at_auction in
numerator = max 0 ( collateral_to_auction * num_fl * ( den_ek * outstanding_kit - kit_sf * ) )
let numerator =
let numerator =
Ligo.mul_int_int
(Ligo.mul_nat_int (tok_to_denomination_nat collateral_to_auction) num_fl)
(Ligo.sub_int_int
(Ligo.mul_int_nat den_ek (kit_to_denomination_nat b.outstanding_kit))
(Ligo.mul_int_int kit_scaling_factor_int num_ek)
) in
max_int (Ligo.int_from_literal "0") numerator in
(* denominator = collateral * den_fl * kit_sf * den_ek *)
let denominator =
Ligo.mul_int_int
(Ligo.mul_nat_int (tok_to_denomination_nat b.collateral) den_fl)
(Ligo.mul_int_int kit_scaling_factor_int den_ek) in
Some (kit_of_fraction_ceil numerator denominator) (* Round up here; safer for the system, less so for the burrow *)
let burrow_request_liquidation (p: parameters) (b: burrow) : liquidation_result =
let b = burrow_touch p b in
let partial_reward =
let { num = num_lrp; den = den_lrp; } = liquidation_reward_percentage in
tok_of_fraction_floor
(Ligo.mul_nat_int (tok_to_denomination_nat b.collateral) num_lrp)
(Ligo.mul_int_int tok_scaling_factor_int den_lrp)
in
if not (burrow_is_liquidatable p b) then
(* Case 1: The outstanding kit does not exceed the liquidation limit, or
* the burrow is already without its creation deposit, inactive; we
* shouldn't liquidate the burrow. *)
(None : liquidation_result)
else
let liquidation_reward = tok_add creation_deposit partial_reward in
if lt_tok_tok (tok_sub b.collateral partial_reward) creation_deposit then
(* Case 2a: Cannot even refill the creation deposit; liquidate the whole
* thing (after paying the liquidation reward of course). *)
let collateral_to_auction = tok_sub b.collateral partial_reward in
let final_burrow =
{ b with
active = false;
collateral = tok_zero;
collateral_at_auction = tok_add b.collateral_at_auction collateral_to_auction;
} in
Some
( Close,
{ liquidation_reward = liquidation_reward;
collateral_to_auction = collateral_to_auction;
burrow_state = final_burrow; }
)
else
Case 2b : We can replenish the creation deposit . Now we got ta see if it 's
* possible to liquidate the burrow partially or if we have to do so
* completely ( deplete the collateral ) .
* possible to liquidate the burrow partially or if we have to do so
* completely (deplete the collateral). *)
let b_without_reward = { b with collateral = tok_sub (tok_sub b.collateral partial_reward) creation_deposit } in
let collateral_to_auction = compute_collateral_to_auction p b_without_reward in
(* FIXME: The property checked by the following assertion is quite
* intricate to prove. We probably should include the proof somewhere
* in the codebase. *)
assert (Ligo.gt_int_int collateral_to_auction (Ligo.int_from_literal "0"));
if Ligo.gt_int_int collateral_to_auction (tok_to_denomination_int b_without_reward.collateral) then
Case 2b.1 : With the current price it 's impossible to make the burrow
* not undercollateralized ; pay the liquidation reward , stash away the
* creation deposit , and liquidate all the remaining collateral , even if
* it is not expected to repay enough kit .
* not undercollateralized; pay the liquidation reward, stash away the
* creation deposit, and liquidate all the remaining collateral, even if
* it is not expected to repay enough kit. *)
let collateral_to_auction = b_without_reward.collateral in (* OVERRIDE *)
let final_burrow =
{ b with
collateral = tok_zero;
collateral_at_auction = tok_add b.collateral_at_auction collateral_to_auction;
} in
Some
( Complete,
{ liquidation_reward = liquidation_reward;
collateral_to_auction = collateral_to_auction;
burrow_state = final_burrow; }
)
else
Case 2b.2 : Recovery is possible ; pay the liquidation reward , stash away the
* creation deposit , and liquidate the collateral needed to underburrow
* the burrow ( assuming that the past auctions will be successful but
* warranted , and that the liquidation we are performing will also be
* deemed warranted ) . If --- when the auction is over --- we realize that the
* liquidation was not really warranted , we shall return the auction
* earnings in their entirety . If not , then only 90 % of the earnings
* shall be returned .
* creation deposit, and liquidate the collateral needed to underburrow
* the burrow (assuming that the past auctions will be successful but
* warranted, and that the liquidation we are performing will also be
* deemed warranted). If---when the auction is over---we realize that the
* liquidation was not really warranted, we shall return the auction
* earnings in their entirety. If not, then only 90% of the earnings
* shall be returned. *)
let collateral_to_auction = match Ligo.is_nat collateral_to_auction with
| Some collateral -> tok_of_denomination collateral
(* Note: disabling coverage for this line since it really should be impossible to reach this line *)
| None -> (Ligo.failwith internalError_ComputeTezToAuctionNegativeResult : tok)
[@coverage off]
in
let final_burrow =
{ b with
collateral = tok_sub b_without_reward.collateral collateral_to_auction;
collateral_at_auction = tok_add b.collateral_at_auction collateral_to_auction;
} in
Some
( Partial,
{ liquidation_reward = liquidation_reward;
collateral_to_auction = collateral_to_auction;
burrow_state = final_burrow; }
)
(* BEGIN_OCAML *)
[@@@coverage off]
let burrow_collateral (b: burrow) : tok =
b.collateral
let burrow_active (b: burrow) : bool =
b.active
let make_burrow_for_test
~active
~address
~collateral
~outstanding_kit
~adjustment_index
~collateral_at_auction
~last_checker_timestamp =
{
address = address;
active = active;
collateral = collateral;
outstanding_kit = outstanding_kit;
adjustment_index = adjustment_index;
collateral_at_auction = collateral_at_auction;
last_checker_timestamp = last_checker_timestamp;
}
(** NOTE: For testing only. Check whether a burrow is overburrowed, assuming
* that all collateral that is in auctions at the moment will be sold at the
* current minting price, and that all these liquidations were warranted
* (i.e. liquidation penalties have been paid).
*
* collateral < fminting * (kit_outstanding - expected_kit_from_auctions) * minting_price
*)
let burrow_is_optimistically_overburrowed (p: parameters) (b: burrow) : bool =
assert (p.last_touched = b.last_checker_timestamp); (* Alternatively: touch the burrow here *)
let { num = num_fm; den = den_fm; } = fminting in
let { num = num_mp; den = den_mp; } = minting_price p in
let { num = num_ek; den = den_ek; } = compute_expected_kit p b.collateral_at_auction in
lhs = collateral * den_fm * kit_sf * den_ek * den_mp
let lhs =
Ligo.mul_nat_int
(tok_to_denomination_nat b.collateral)
(Ligo.mul_int_int
(Ligo.mul_int_int den_fm kit_scaling_factor_int)
(Ligo.mul_int_int den_ek den_mp)
) in
rhs = num_fm * ( kit_outstanding * den_ek - kit_sf * num_ek ) * num_mp * tez_sf
let rhs =
Ligo.mul_int_int
num_fm
(Ligo.mul_int_int
(Ligo.sub_int_int
(Ligo.mul_nat_int (kit_to_denomination_nat b.outstanding_kit) den_ek)
(Ligo.mul_int_int kit_scaling_factor_int num_ek)
)
(Ligo.mul_int_int num_mp tok_scaling_factor_int)
) in
Ligo.lt_int_int lhs rhs
let burrow_outstanding_kit (b: burrow) : kit = b.outstanding_kit
[@@@coverage on]
(* END_OCAML *)
| null | https://raw.githubusercontent.com/tezos-checker/checker/f8604c1493c2ecf94b17ebc35346d59f5028ecaa/src/burrow.ml | ocaml | Whether the creation deposit for the burrow has been paid. If the
* creation deposit has been paid, the burrow is considered "active" and
* "closed"/inactive otherwise. Paying the creation deposit re-activates
* a "closed" burrow.
Address of the contract holding the burrow's collateral.
Collateral currently stored in the burrow.
Outstanding kit minted out of the burrow.
The imbalance adjustment index observed the last time the burrow was
* touched.
Collateral that has been sent off to auctions. For all intents and
* purposes, this collateral can be considered gone, but depending on the
* outcome of the auctions we expect some kit in return.
The timestamp checker had the last time the burrow was touched.
partial: some collateral remains in the burrow
complete: deplete the collateral
complete: deplete the collateral AND the creation deposit
* Update the outstanding kit, update the adjustment index, and the timestamp.
* Under-collateralization condition: tok < f * kit * price.
* Check whether a burrow is overburrowed. A burrow is overburrowed if
*
* collateral < fminting * kit_outstanding * minting_price
*
* The quantity collateral / (fminting * minting_price) we call the burrowing
* limit (normally kit_outstanding <= burrowing_limit). NOTE: for the
* purposes of minting/checking overburrowedness, we do not take into
* account expected kit from pending auctions; for all we know, this could
* be lost forever.
max_kit_outstanding = FLOOR (collateral / (fminting * minting_price))
NOTE: If checker is up-to-date, the timestamp should be _now_.
* Add non-negative collateral to a burrow.
* Withdraw a non-negative amount of collateral from the burrow, as long as
* this will not overburrow it.
* Mint a non-negative amount of kits from the burrow, as long as this will
* not overburrow it
* Deposit/burn a non-negative amount of kit to the burrow. Return the amount
* of kit burned.
* Activate a currently inactive burrow. This operation will fail if either
* the burrow is already active, or if the amount of tez given is less than
* the creation deposit.
*************************************************************************
* LIQUIDATION-RELATED
*************************************************************************
* Compute the number of tez that needs to be auctioned off so that the burrow
* can return to a state when it is no longer overburrowed or having a risk of
* liquidation (assuming the current expected minting price). For its
* calculation, see docs/burrow-state-liquidations.md. Note that it's skewed
* on the safe side (overapproximation). This ensures that after a partial
* liquidation we are no longer "optimistically overburrowed".
* Returns the number of tez in mutez
* Compute the amount of kit we expect to receive from auctioning off an
* amount of tez, using the current minting price. Since this is an artifice,
* a mere expectation, we neither floor nor ceil, but instead return the
* lossless fraction as is.
* Check whether a burrow can be marked for liquidation. A burrow can be
* marked for liquidation if:
*
* tez_collateral < fliquidation * (kit_outstanding - expected_kit_from_auctions) * liquidation_price
*
* The quantity tez_collateral / (fliquidation * liquidation_price) we call the
* liquidation limit. Note that for this check we optimistically take into
* account the expected kit from pending auctions (using the current minting
* price) when computing the outstanding kit. Note that only active burrows
* can be liquidated; inactive ones are dormant, until either all pending
* auctions finish or if their creation deposit is restored.
kit = kit_outstanding - expected_kit_from_auctions
* Check whether the return of a slice to its burrow (cancellation) is
* warranted. For the cancellation to be warranted, it must be the case that
* after returning the slice to the burrow, the burrow is optimistically
* non-overburrowed (i.e., if all remaining collateral at auction sells at the
* current price but with penalties paid, the burrow becomes underburrowed):
*
* collateral + slice >= fminting * (outstanding - compute_expected_kit (collateral_at_auction - slice)) * minting_price
*
* Note that only active burrows can be liquidated; inactive ones are dormant,
* until either all pending auctions finish or if their creation deposit is
* restored.
tez = collateral + slice
kit = outstanding - compute_expected_kit (collateral_at_auction - slice)
(a): infinity, basically
denominator = collateral * den_fl * kit_sf * den_ek
Round up here; safer for the system, less so for the burrow
Case 1: The outstanding kit does not exceed the liquidation limit, or
* the burrow is already without its creation deposit, inactive; we
* shouldn't liquidate the burrow.
Case 2a: Cannot even refill the creation deposit; liquidate the whole
* thing (after paying the liquidation reward of course).
FIXME: The property checked by the following assertion is quite
* intricate to prove. We probably should include the proof somewhere
* in the codebase.
OVERRIDE
Note: disabling coverage for this line since it really should be impossible to reach this line
BEGIN_OCAML
* NOTE: For testing only. Check whether a burrow is overburrowed, assuming
* that all collateral that is in auctions at the moment will be sold at the
* current minting price, and that all these liquidations were warranted
* (i.e. liquidation penalties have been paid).
*
* collateral < fminting * (kit_outstanding - expected_kit_from_auctions) * minting_price
Alternatively: touch the burrow here
END_OCAML | open FixedPoint
open Kit
open Tok
open Parameters
open LiquidationAuctionPrimitiveTypes
open Constants
open Error
open Common
[@@@coverage off]
type burrow =
active : bool;
address: Ligo.address;
collateral : tok;
outstanding_kit : kit;
adjustment_index : fixedpoint;
collateral_at_auction : tok;
last_checker_timestamp : Ligo.timestamp;
}
[@@deriving show]
type liquidation_details =
{ liquidation_reward : tok;
collateral_to_auction : tok;
burrow_state : burrow;
}
[@@deriving show]
type liquidation_type =
| Partial
| Complete
| Close
[@@deriving show]
type liquidation_result = (liquidation_type * liquidation_details) option
[@@deriving show]
[@@@coverage on]
let burrow_touch (p: parameters) (burrow: burrow) : burrow =
let burrow_out = if p.last_touched = burrow.last_checker_timestamp
then
burrow
else
let current_adjustment_index = compute_adjustment_index p in
{ burrow with
outstanding_kit =
kit_of_fraction_floor
(Ligo.mul_nat_int
(kit_to_denomination_nat burrow.outstanding_kit)
(fixedpoint_to_raw current_adjustment_index)
)
(Ligo.mul_int_int
kit_scaling_factor_int
(fixedpoint_to_raw burrow.adjustment_index)
);
adjustment_index = current_adjustment_index;
last_checker_timestamp = p.last_touched;
}
in
assert (burrow.address = burrow_out.address);
burrow_out
let[@inline] burrow_address (b: burrow) : Ligo.address =
b.address
* Computes the total amount of tok associated with a burrow . This includes
* the collateral , collateral_at_auction , and the creation_deposit if the
* burrow is active .
* the collateral, collateral_at_auction, and the creation_deposit if the
* burrow is active. *)
let burrow_total_associated_tok (b: burrow) : tok =
tok_add
(tok_add b.collateral b.collateral_at_auction)
(if b.active then creation_deposit else tok_zero)
let[@inline] burrow_collateral_at_auction (b: burrow) : tok =
b.collateral_at_auction
let[@inline] undercollateralization_condition (f: ratio) (price: ratio) (tok: ratio) (kit: ratio) : bool =
let { num = num_f; den = den_f; } = f in
let { num = num_p; den = den_p; } = price in
let { num = num_tz; den = den_tz; } = tok in
let { num = num_kt; den = den_kt; } = kit in
let lhs =
Ligo.mul_int_int
(Ligo.mul_int_int num_tz den_f)
(Ligo.mul_int_int den_kt den_p) in
let rhs =
Ligo.mul_int_int
(Ligo.mul_int_int num_f num_kt)
(Ligo.mul_int_int den_tz num_p) in
Ligo.lt_int_int lhs rhs
let burrow_is_overburrowed (p: parameters) (b: burrow) : bool =
assert (p.last_touched = b.last_checker_timestamp);
let tok = { num = tok_to_denomination_int b.collateral; den = tok_scaling_factor_int; } in
let kit = { num = kit_to_denomination_int b.outstanding_kit; den = kit_scaling_factor_int; } in
undercollateralization_condition fminting (minting_price p) tok kit
let burrow_max_mintable_kit (p: parameters) (b: burrow) : kit =
assert (p.last_touched = b.last_checker_timestamp);
let { num = num_fm; den = den_fm; } = fminting in
let { num = num_mp; den = den_mp; } = minting_price p in
let numerator =
Ligo.mul_nat_int
(tok_to_denomination_nat b.collateral)
(Ligo.mul_int_int den_fm den_mp) in
let denominator =
Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.mul_int_int num_fm num_mp) in
kit_of_fraction_floor numerator denominator
let burrow_return_slice_from_auction
(slice: liquidation_slice_contents)
(burrow: burrow)
: burrow =
assert burrow.active;
assert (geq_tok_tok burrow.collateral_at_auction slice.tok);
let burrow_out =
{ burrow with
collateral = tok_add burrow.collateral slice.tok;
collateral_at_auction = tok_sub burrow.collateral_at_auction slice.tok;
} in
assert (burrow.address = burrow_out.address);
burrow_out
let burrow_return_kit_from_auction
(slice: liquidation_slice_contents)
(kit: kit)
(burrow: burrow) : burrow * kit * kit =
assert (geq_tok_tok burrow.collateral_at_auction slice.tok);
let returned_kit = kit_min burrow.outstanding_kit kit in
let excess_kit = kit_sub kit returned_kit in
let burrow_out =
{ burrow with
outstanding_kit = kit_sub burrow.outstanding_kit returned_kit;
collateral_at_auction = tok_sub burrow.collateral_at_auction slice.tok;
} in
assert (burrow.address = burrow_out.address);
assert (eq_kit_kit (kit_add returned_kit excess_kit) kit);
(burrow_out, returned_kit, excess_kit)
let burrow_create (p: parameters) (addr: Ligo.address) (tok: tok) : burrow =
if lt_tok_tok tok creation_deposit
then (Ligo.failwith error_InsufficientFunds : burrow)
else
{ active = true;
address = addr;
collateral = tok_sub tok creation_deposit;
outstanding_kit = kit_zero;
adjustment_index = compute_adjustment_index p;
collateral_at_auction = tok_zero;
}
TOKFIX : we need a more generic name ( e.g. , deposit_collateral )
let[@inline] burrow_deposit_collateral (p: parameters) (t: tok) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow_out = { b with collateral = tok_add b.collateral t } in
assert (b.address = burrow_out.address);
burrow_out
TOKFIX : we need a more generic name ( e.g. , withdraw_collateral )
let burrow_withdraw_collateral (p: parameters) (t: tok) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow = { b with collateral = tok_sub b.collateral t } in
let burrow_out = if burrow_is_overburrowed p burrow
then (Ligo.failwith error_WithdrawTezFailure : burrow)
else burrow
in
assert (b.address = burrow_out.address);
burrow_out
let burrow_mint_kit (p: parameters) (kit: kit) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow_out =
let burrow = { b with outstanding_kit = kit_add b.outstanding_kit kit } in
if burrow_is_overburrowed p burrow
then (Ligo.failwith error_MintKitFailure : burrow)
else burrow
in
assert (b.address = burrow_out.address);
burrow_out
let[@inline] burrow_burn_kit (p: parameters) (kit: kit) (b: burrow) : burrow * kit =
let b = burrow_touch p b in
let actual_burned = kit_min b.outstanding_kit kit in
let burrow_out = {b with outstanding_kit = kit_sub b.outstanding_kit actual_burned} in
assert (b.address = burrow_out.address);
(burrow_out, actual_burned)
let burrow_activate (p: parameters) (tok: tok) (b: burrow) : burrow =
let b = burrow_touch p b in
let burrow_out =
if lt_tok_tok tok creation_deposit then
(Ligo.failwith error_InsufficientFunds : burrow)
else if b.active then
(Ligo.failwith error_BurrowIsAlreadyActive : burrow)
else
{ b with
active = true;
collateral = tok_sub tok creation_deposit;
}
in
assert (b.address = burrow_out.address);
burrow_out
* Deativate a currently active burrow . This operation will fail if the burrow
* ( a ) is already inactive , or ( b ) is overburrowed , or ( c ) has kit
* outstanding , or ( d ) has collateral sent off to auctions .
* (a) is already inactive, or (b) is overburrowed, or (c) has kit
* outstanding, or (d) has collateral sent off to auctions. *)
let burrow_deactivate (p: parameters) (b: burrow) : (burrow * tok) =
let b = burrow_touch p b in
let burrow_out, return =
if burrow_is_overburrowed p b then
(Ligo.failwith error_DeactivatingAnOverburrowedBurrow : (burrow * tok))
else if (not b.active) then
(Ligo.failwith error_DeactivatingAnInactiveBurrow : (burrow * tok))
else if gt_kit_kit b.outstanding_kit kit_zero then
(Ligo.failwith error_DeactivatingWithOutstandingKit : (burrow * tok))
else if gt_tok_tok b.collateral_at_auction tok_zero then
(Ligo.failwith error_DeactivatingWithCollateralAtAuctions : (burrow * tok))
else
let return = tok_add b.collateral creation_deposit in
let updated_burrow =
{ b with
active = false;
collateral = tok_zero;
} in
(updated_burrow, return)
in
assert (b.address = burrow_out.address);
burrow_out, return
let compute_collateral_to_auction (p: parameters) (b: burrow) : Ligo.int =
let { num = num_fm; den = den_fm; } = fminting in
let { num = num_mp; den = den_mp; } = minting_price p in
Note that num_lp and den_lp here are actually = 1 - liquidation_penalty
let { num = num_lp; den = den_lp; } =
let { num = num_lp; den = den_lp; } = liquidation_penalty in
{ num = Ligo.sub_int_int den_lp num_lp; den = den_lp; }
in
numerator = tez_sf * den_lp * num_fm * num_mp * outstanding_kit
- kit_sf * den_mp * ( num_lp * num_fm * * den_fm * collateral )
- kit_sf * den_mp * (num_lp * num_fm * collateral_at_auctions + den_lp * den_fm * collateral) *)
let numerator =
Ligo.sub_int_int
(Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.mul_int_int
den_lp
(Ligo.mul_int_int
num_fm
(Ligo.mul_int_nat
num_mp
(kit_to_denomination_nat b.outstanding_kit)
)
)
)
)
(Ligo.mul_int_int
(Ligo.mul_int_int kit_scaling_factor_int den_mp)
(Ligo.add_int_int
(Ligo.mul_int_int num_lp (Ligo.mul_int_nat num_fm (tok_to_denomination_nat b.collateral_at_auction)))
(Ligo.mul_int_int den_lp (Ligo.mul_int_nat den_fm (tok_to_denomination_nat b.collateral)))
)
) in
denominator = ( kit_sf * den_mp * tez_sf ) * ( num_lp * num_fm - den_lp * den_fm )
let denominator =
Ligo.mul_int_int
kit_scaling_factor_int
(Ligo.mul_int_int
den_mp
(Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.sub_int_int
(Ligo.mul_int_int num_lp num_fm)
(Ligo.mul_int_int den_lp den_fm)
)
)
) in
cdiv_int_int (Ligo.mul_int_int numerator tok_scaling_factor_int) denominator
let compute_expected_kit (p: parameters) (collateral_to_auction: tok) : ratio =
let { num = num_lp; den = den_lp; } = liquidation_penalty in
let { num = num_mp; den = den_mp; } = minting_price p in
let numerator =
Ligo.mul_nat_int
(tok_to_denomination_nat collateral_to_auction)
(Ligo.mul_int_int
(Ligo.sub_int_int den_lp num_lp)
den_mp
) in
let denominator =
Ligo.mul_int_int
tok_scaling_factor_int
(Ligo.mul_int_int den_lp num_mp) in
{ num = numerator; den = denominator; }
let burrow_is_liquidatable (p: parameters) (b: burrow) : bool =
assert (p.last_touched = b.last_checker_timestamp);
let tez = { num = tok_to_denomination_int b.collateral; den = tok_scaling_factor_int; } in
let { num = num_ek; den = den_ek; } = compute_expected_kit p b.collateral_at_auction in
{ num =
Ligo.sub_int_int
(Ligo.mul_nat_int (kit_to_denomination_nat b.outstanding_kit) den_ek)
(Ligo.mul_int_int kit_scaling_factor_int num_ek);
den = Ligo.mul_int_int kit_scaling_factor_int den_ek;
} in
b.active && undercollateralization_condition fliquidation (liquidation_price p) tez kit
let burrow_is_cancellation_warranted (p: parameters) (b: burrow) (slice_tok: tok) : bool =
assert (p.last_touched = b.last_checker_timestamp);
assert (geq_tok_tok b.collateral_at_auction slice_tok);
{ num = tok_to_denomination_int (tok_add b.collateral slice_tok);
den = tok_scaling_factor_int;
} in
let { num = num_ek; den = den_ek; } =
compute_expected_kit p (tok_sub b.collateral_at_auction slice_tok) in
{ num =
Ligo.sub_int_int
(Ligo.mul_nat_int (kit_to_denomination_nat b.outstanding_kit) den_ek)
(Ligo.mul_int_int kit_scaling_factor_int num_ek);
den = Ligo.mul_int_int kit_scaling_factor_int den_ek;
} in
b.active && not (undercollateralization_condition fminting (minting_price p) tez kit)
* Compute the minumum amount of kit to receive for considering the
* liquidation unwarranted , calculated as ( see
* docs / burrow - state - liquidations.md for the derivation of this formula ):
*
* collateral_to_auction * ( fliquidation * ( outstanding_kit - expected_kit_from_auctions ) ) / collateral
*
* If the burrow has no collateral left in it ( e.g. , right after a successful
* Complete - liquidation ) then we have two cases :
* ( a ) If the outstanding kit is non - zero then there is no way for this
* liquidation to be considered unwarranted . outstanding_kit is infinitely
* many times greater than the collateral .
* ( b ) If the outstanding kit is also zero then the liquidation in question
* should n't have happened ( so it is by definition unwarranted ) . I think
* that this is impossible in practice , but it 's probably best to account
* for it so that the function is not partial .
* liquidation unwarranted, calculated as (see
* docs/burrow-state-liquidations.md for the derivation of this formula):
*
* collateral_to_auction * (fliquidation * (outstanding_kit - expected_kit_from_auctions)) / collateral
*
* If the burrow has no collateral left in it (e.g., right after a successful
* Complete-liquidation) then we have two cases:
* (a) If the outstanding kit is non-zero then there is no way for this
* liquidation to be considered unwarranted. outstanding_kit is infinitely
* many times greater than the collateral.
* (b) If the outstanding kit is also zero then the liquidation in question
* shouldn't have happened (so it is by definition unwarranted). I think
* that this is impossible in practice, but it's probably best to account
* for it so that the function is not partial.
*)
let[@inline] compute_min_kit_for_unwarranted (p: parameters) (b: burrow) (collateral_to_auction: tok) : kit option =
assert (p.last_touched = b.last_checker_timestamp);
NOTE : division by zero .
then
if not (eq_kit_kit b.outstanding_kit (kit_of_denomination (Ligo.nat_from_literal "0n")))
( b ): zero
else
let { num = num_fl; den = den_fl; } = fliquidation in
let { num = num_ek; den = den_ek; } = compute_expected_kit p b.collateral_at_auction in
numerator = max 0 ( collateral_to_auction * num_fl * ( den_ek * outstanding_kit - kit_sf * ) )
let numerator =
let numerator =
Ligo.mul_int_int
(Ligo.mul_nat_int (tok_to_denomination_nat collateral_to_auction) num_fl)
(Ligo.sub_int_int
(Ligo.mul_int_nat den_ek (kit_to_denomination_nat b.outstanding_kit))
(Ligo.mul_int_int kit_scaling_factor_int num_ek)
) in
max_int (Ligo.int_from_literal "0") numerator in
let denominator =
Ligo.mul_int_int
(Ligo.mul_nat_int (tok_to_denomination_nat b.collateral) den_fl)
(Ligo.mul_int_int kit_scaling_factor_int den_ek) in
let burrow_request_liquidation (p: parameters) (b: burrow) : liquidation_result =
let b = burrow_touch p b in
let partial_reward =
let { num = num_lrp; den = den_lrp; } = liquidation_reward_percentage in
tok_of_fraction_floor
(Ligo.mul_nat_int (tok_to_denomination_nat b.collateral) num_lrp)
(Ligo.mul_int_int tok_scaling_factor_int den_lrp)
in
if not (burrow_is_liquidatable p b) then
(None : liquidation_result)
else
let liquidation_reward = tok_add creation_deposit partial_reward in
if lt_tok_tok (tok_sub b.collateral partial_reward) creation_deposit then
let collateral_to_auction = tok_sub b.collateral partial_reward in
let final_burrow =
{ b with
active = false;
collateral = tok_zero;
collateral_at_auction = tok_add b.collateral_at_auction collateral_to_auction;
} in
Some
( Close,
{ liquidation_reward = liquidation_reward;
collateral_to_auction = collateral_to_auction;
burrow_state = final_burrow; }
)
else
Case 2b : We can replenish the creation deposit . Now we got ta see if it 's
* possible to liquidate the burrow partially or if we have to do so
* completely ( deplete the collateral ) .
* possible to liquidate the burrow partially or if we have to do so
* completely (deplete the collateral). *)
let b_without_reward = { b with collateral = tok_sub (tok_sub b.collateral partial_reward) creation_deposit } in
let collateral_to_auction = compute_collateral_to_auction p b_without_reward in
assert (Ligo.gt_int_int collateral_to_auction (Ligo.int_from_literal "0"));
if Ligo.gt_int_int collateral_to_auction (tok_to_denomination_int b_without_reward.collateral) then
Case 2b.1 : With the current price it 's impossible to make the burrow
* not undercollateralized ; pay the liquidation reward , stash away the
* creation deposit , and liquidate all the remaining collateral , even if
* it is not expected to repay enough kit .
* not undercollateralized; pay the liquidation reward, stash away the
* creation deposit, and liquidate all the remaining collateral, even if
* it is not expected to repay enough kit. *)
let final_burrow =
{ b with
collateral = tok_zero;
collateral_at_auction = tok_add b.collateral_at_auction collateral_to_auction;
} in
Some
( Complete,
{ liquidation_reward = liquidation_reward;
collateral_to_auction = collateral_to_auction;
burrow_state = final_burrow; }
)
else
Case 2b.2 : Recovery is possible ; pay the liquidation reward , stash away the
* creation deposit , and liquidate the collateral needed to underburrow
* the burrow ( assuming that the past auctions will be successful but
* warranted , and that the liquidation we are performing will also be
* deemed warranted ) . If --- when the auction is over --- we realize that the
* liquidation was not really warranted , we shall return the auction
* earnings in their entirety . If not , then only 90 % of the earnings
* shall be returned .
* creation deposit, and liquidate the collateral needed to underburrow
* the burrow (assuming that the past auctions will be successful but
* warranted, and that the liquidation we are performing will also be
* deemed warranted). If---when the auction is over---we realize that the
* liquidation was not really warranted, we shall return the auction
* earnings in their entirety. If not, then only 90% of the earnings
* shall be returned. *)
let collateral_to_auction = match Ligo.is_nat collateral_to_auction with
| Some collateral -> tok_of_denomination collateral
| None -> (Ligo.failwith internalError_ComputeTezToAuctionNegativeResult : tok)
[@coverage off]
in
let final_burrow =
{ b with
collateral = tok_sub b_without_reward.collateral collateral_to_auction;
collateral_at_auction = tok_add b.collateral_at_auction collateral_to_auction;
} in
Some
( Partial,
{ liquidation_reward = liquidation_reward;
collateral_to_auction = collateral_to_auction;
burrow_state = final_burrow; }
)
[@@@coverage off]
let burrow_collateral (b: burrow) : tok =
b.collateral
let burrow_active (b: burrow) : bool =
b.active
let make_burrow_for_test
~active
~address
~collateral
~outstanding_kit
~adjustment_index
~collateral_at_auction
~last_checker_timestamp =
{
address = address;
active = active;
collateral = collateral;
outstanding_kit = outstanding_kit;
adjustment_index = adjustment_index;
collateral_at_auction = collateral_at_auction;
last_checker_timestamp = last_checker_timestamp;
}
let burrow_is_optimistically_overburrowed (p: parameters) (b: burrow) : bool =
let { num = num_fm; den = den_fm; } = fminting in
let { num = num_mp; den = den_mp; } = minting_price p in
let { num = num_ek; den = den_ek; } = compute_expected_kit p b.collateral_at_auction in
lhs = collateral * den_fm * kit_sf * den_ek * den_mp
let lhs =
Ligo.mul_nat_int
(tok_to_denomination_nat b.collateral)
(Ligo.mul_int_int
(Ligo.mul_int_int den_fm kit_scaling_factor_int)
(Ligo.mul_int_int den_ek den_mp)
) in
rhs = num_fm * ( kit_outstanding * den_ek - kit_sf * num_ek ) * num_mp * tez_sf
let rhs =
Ligo.mul_int_int
num_fm
(Ligo.mul_int_int
(Ligo.sub_int_int
(Ligo.mul_nat_int (kit_to_denomination_nat b.outstanding_kit) den_ek)
(Ligo.mul_int_int kit_scaling_factor_int num_ek)
)
(Ligo.mul_int_int num_mp tok_scaling_factor_int)
) in
Ligo.lt_int_int lhs rhs
let burrow_outstanding_kit (b: burrow) : kit = b.outstanding_kit
[@@@coverage on]
|
c3514c310602a461500a4dcd64d7a25a3115a9a6e01aea9089cdbb860d2f5cb9 | abyala/advent-2021-clojure | day09_test.clj | (ns advent-2021-clojure.day09-test
(:require [clojure.test :refer :all]
[advent-2021-clojure.day09 :refer :all]))
(def test-input "2199943210\n3987894921\n9856789892\n8767896789\n9899965678")
(def puzzle-input (slurp "resources/day09_data.txt"))
(deftest part1-test
(are [expected input] (= expected (part1 input))
15 test-input
444 puzzle-input))
(deftest part2-test
(are [expected input] (= expected (part2 input))
1134 test-input
1168440 puzzle-input)) | null | https://raw.githubusercontent.com/abyala/advent-2021-clojure/5e01dec4963cb87a738d389893674eeb13aea42b/test/advent_2021_clojure/day09_test.clj | clojure | (ns advent-2021-clojure.day09-test
(:require [clojure.test :refer :all]
[advent-2021-clojure.day09 :refer :all]))
(def test-input "2199943210\n3987894921\n9856789892\n8767896789\n9899965678")
(def puzzle-input (slurp "resources/day09_data.txt"))
(deftest part1-test
(are [expected input] (= expected (part1 input))
15 test-input
444 puzzle-input))
(deftest part2-test
(are [expected input] (= expected (part2 input))
1134 test-input
1168440 puzzle-input)) |
|
d4c52eceba9e30dc67ff09b1b8981a9856b4ba331e923c53bfac948fc67db2c5 | helins/wasm.cljc | decompile.clj | Decompiling and pretty printing a WASM program such as " my_program.wasm " :
;;
;; $ bb decompile.clj my_program.wasm
;;
Following WASM specs 1.1 , compatible with > = 0.3.5
(require '[babashka.deps :as deps])
(deps/add-deps '{:deps {io.helins/wasm {:mvn/version "0.0.0-alpha2"}}})
(require 'clojure.pprint
'[helins.wasm :as wasm])
(-> *command-line-args*
first
wasm/decompile-file
clojure.pprint/pprint)
| null | https://raw.githubusercontent.com/helins/wasm.cljc/bef1393899089763e0114aefbaec6303838ec7a0/src/babashka/decompile.clj | clojure |
$ bb decompile.clj my_program.wasm
| Decompiling and pretty printing a WASM program such as " my_program.wasm " :
Following WASM specs 1.1 , compatible with > = 0.3.5
(require '[babashka.deps :as deps])
(deps/add-deps '{:deps {io.helins/wasm {:mvn/version "0.0.0-alpha2"}}})
(require 'clojure.pprint
'[helins.wasm :as wasm])
(-> *command-line-args*
first
wasm/decompile-file
clojure.pprint/pprint)
|
02b19ee4387a2954b4140385a8ad7be953d77bdbe930d0d13a4aa4db5cead401 | cunger/pythia | utils.clj | (ns setup.utils
(:require [clojure.java.io :as io]
[clojure.set :as set]))
;; IO
(defn files-in [dir-name]
(rest (file-seq (io/file dir-name))))
(defn file-name-matches? [re file]
(not (nil? (re-find re (.getName file)))))
(defn strip-src [file-name]
(clojure.string/replace file-name "src/" ""))
(defn copy-in [dir file]
(io/file (str dir (.getName file))))
(defn copy-to [file folder]
(io/copy file (copy-in folder file)))
(defn clean [folder]
(doseq [f (files-in folder)]
(io/delete-file f)))
;; LIST
(defn init [ls] (take (- (count ls) 1) ls))
(defn minus [ls1 ls2] (vec (set/difference (set ls1) (set ls2))))
(defn overlap [ls1 ls2] (vec (set/intersection (set ls1) (set ls2))))
(defn overlap? [ls1 ls2] (not (empty? (overlap ls1 ls2))))
(defn elem [e ls] (some #{e} ls))
;; ARRAY
(defn fmap-vals [f m]
(into {} (for [[k v] m] [k (f v)])))
(defn fmap-keys [f m]
(into {} (for [[k v] m] [(f k) v])))
(defn remove-nil-values [m]
(apply dissoc m (filter (fn [k] (nil? (k m))) (keys m))))
(defn conjoin-values [x y]
(let [conjoined (distinct
(let [x-a-coll (coll? x)
y-a-coll (coll? y)]
(cond (and x-a-coll (not y-a-coll)) (cons y x)
(and y-a-coll (not x-a-coll)) (cons x y)
(and x-a-coll y-a-coll) (concat x y)
(and (not x-a-coll) (not y-a-coll)) [x y])))]
(if (= (count conjoined) 1) (first conjoined) conjoined)))
(defn apply-to-conjoined [f x]
(if (sequential? x) (map f x) (f x)))
(defn find-equivalence-classes [xs bool-func]
returns a list of list of x 's ( where each list is an equivalence class , i.e. ( func x1 x2 ) for any two elements is true )
(reduce (fn [equivalence-classes x]
(loop [done []
todo equivalence-classes]
(if (empty? todo)
(cons [x] done) ; case where no equivalence class for x was found
(let [c (first todo)]
(if (bool-func x (first c))
(concat (cons (cons x c) done) (rest todo)) ; case where c is an equivalence for x
(recur (cons c done) (rest todo))))))) ; case where c is not an equivalence class for x
[]
xs))
(defn differ-only-in [array1 array2 ks]
(every? (fn [k] (or (= (get array1 k) (get array2 k))
(elem k ks)))
(concat (keys array1) (keys array2))))
(defn collapse [arrays]
(apply (partial merge-with conjoin-values) arrays))
(defn collapse-bindings [arrays keys-to-collapse]
; collapses all arrays that differ at most in values for keys-to-collapse
(map collapse (find-equivalence-classes arrays (fn [a1 a2] (differ-only-in a1 a2 keys-to-collapse)))))
;; STRING
(defn words [string] (clojure.string/split string #"\s"))
(defn contains-string? [string substring]
(not (nil? (re-matches (re-pattern (str ".*" substring ".*")) string))))
| null | https://raw.githubusercontent.com/cunger/pythia/f58e35395968d4c46aef495fd363c26b1102003c/src/setup/utils.clj | clojure | IO
LIST
ARRAY
case where no equivalence class for x was found
case where c is an equivalence for x
case where c is not an equivalence class for x
collapses all arrays that differ at most in values for keys-to-collapse
STRING | (ns setup.utils
(:require [clojure.java.io :as io]
[clojure.set :as set]))
(defn files-in [dir-name]
(rest (file-seq (io/file dir-name))))
(defn file-name-matches? [re file]
(not (nil? (re-find re (.getName file)))))
(defn strip-src [file-name]
(clojure.string/replace file-name "src/" ""))
(defn copy-in [dir file]
(io/file (str dir (.getName file))))
(defn copy-to [file folder]
(io/copy file (copy-in folder file)))
(defn clean [folder]
(doseq [f (files-in folder)]
(io/delete-file f)))
(defn init [ls] (take (- (count ls) 1) ls))
(defn minus [ls1 ls2] (vec (set/difference (set ls1) (set ls2))))
(defn overlap [ls1 ls2] (vec (set/intersection (set ls1) (set ls2))))
(defn overlap? [ls1 ls2] (not (empty? (overlap ls1 ls2))))
(defn elem [e ls] (some #{e} ls))
(defn fmap-vals [f m]
(into {} (for [[k v] m] [k (f v)])))
(defn fmap-keys [f m]
(into {} (for [[k v] m] [(f k) v])))
(defn remove-nil-values [m]
(apply dissoc m (filter (fn [k] (nil? (k m))) (keys m))))
(defn conjoin-values [x y]
(let [conjoined (distinct
(let [x-a-coll (coll? x)
y-a-coll (coll? y)]
(cond (and x-a-coll (not y-a-coll)) (cons y x)
(and y-a-coll (not x-a-coll)) (cons x y)
(and x-a-coll y-a-coll) (concat x y)
(and (not x-a-coll) (not y-a-coll)) [x y])))]
(if (= (count conjoined) 1) (first conjoined) conjoined)))
(defn apply-to-conjoined [f x]
(if (sequential? x) (map f x) (f x)))
(defn find-equivalence-classes [xs bool-func]
returns a list of list of x 's ( where each list is an equivalence class , i.e. ( func x1 x2 ) for any two elements is true )
(reduce (fn [equivalence-classes x]
(loop [done []
todo equivalence-classes]
(if (empty? todo)
(let [c (first todo)]
(if (bool-func x (first c))
[]
xs))
(defn differ-only-in [array1 array2 ks]
(every? (fn [k] (or (= (get array1 k) (get array2 k))
(elem k ks)))
(concat (keys array1) (keys array2))))
(defn collapse [arrays]
(apply (partial merge-with conjoin-values) arrays))
(defn collapse-bindings [arrays keys-to-collapse]
(map collapse (find-equivalence-classes arrays (fn [a1 a2] (differ-only-in a1 a2 keys-to-collapse)))))
(defn words [string] (clojure.string/split string #"\s"))
(defn contains-string? [string substring]
(not (nil? (re-matches (re-pattern (str ".*" substring ".*")) string))))
|
229eeb0e49f6e78b8ff201188426bc365081e9664fcb3391fe83878f5a882bbf | VincentToups/racket-lib | lofi-graphics.rkt | #lang racket
(require racket/gui
racket/class
racket/dict
utilities/sprite-loading)
(define character-sheet "./oryx-lofi/x4-lofi_char.png")
(define character-sheet-type 'png/alpha)
(define x4-characters (make-hash))
(dict-set! x4-characters 'normal-0-0
(load-image character-sheet character-sheet-type 0 0 1 1 32 32))
(dict-set! x4-characters 'human-ranger (dict-ref x4-characters 'normal-0-0))
(dict-set! x4-characters 'normal-0-1
(load-image character-sheet character-sheet-type 0 1 1 1 32 32))
(dict-set! x4-characters 'human-soldier (dict-ref x4-characters 'normal-0-1))
(dict-set! x4-characters 'normal-0-2
(load-image character-sheet character-sheet-type 0 2 1 1 32 32))
(dict-set! x4-characters 'human-mage (dict-ref x4-characters 'normal-0-2))
(dict-set! x4-characters 'normal-0-3
(load-image character-sheet character-sheet-type 0 3 1 1 32 32))
(dict-set! x4-characters 'human-rogue (dict-ref x4-characters 'normal-0-3))
(dict-set! x4-characters 'normal-0-4
(load-image character-sheet character-sheet-type 0 4 1 1 32 32))
(dict-set! x4-characters 'human-red-mage (dict-ref x4-characters 'normal-0-4))
(dict-set! x4-characters 'normal-0-5
(load-image character-sheet character-sheet-type 0 5 1 1 32 32))
(dict-set! x4-characters 'human-paladin (dict-ref x4-characters 'normal-0-5))
(dict-set! x4-characters 'normal-0-6
(load-image character-sheet character-sheet-type 0 6 1 1 32 32))
(dict-set! x4-characters 'normal-0-7
(load-image character-sheet character-sheet-type 0 7 1 1 32 32))
(dict-set! x4-characters 'normal-0-8
(load-image character-sheet character-sheet-type 0 8 1 1 32 32))
(dict-set! x4-characters 'normal-0-9
(load-image character-sheet character-sheet-type 0 9 1 1 32 32))
(dict-set! x4-characters 'normal-0-10
(load-image character-sheet character-sheet-type 0 10 1 1 32 32))
(dict-set! x4-characters 'normal-0-11
(load-image character-sheet character-sheet-type 0 11 1 1 32 32))
(dict-set! x4-characters 'normal-0-12
(load-image character-sheet character-sheet-type 0 12 1 1 32 32))
(dict-set! x4-characters 'normal-0-13
(load-image character-sheet character-sheet-type 0 13 1 1 32 32))
(dict-set! x4-characters 'normal-0-14
(load-image character-sheet character-sheet-type 0 14 1 1 32 32))
(dict-set! x4-characters 'normal-1-0
(load-image character-sheet character-sheet-type 1 0 1 1 32 32))
(dict-set! x4-characters 'normal-1-1
(load-image character-sheet character-sheet-type 1 1 1 1 32 32))
(dict-set! x4-characters 'normal-1-2
(load-image character-sheet character-sheet-type 1 2 1 1 32 32))
(dict-set! x4-characters 'normal-1-3
(load-image character-sheet character-sheet-type 1 3 1 1 32 32))
(dict-set! x4-characters 'normal-1-4
(load-image character-sheet character-sheet-type 1 4 1 1 32 32))
(dict-set! x4-characters 'normal-1-5
(load-image character-sheet character-sheet-type 1 5 1 1 32 32))
(dict-set! x4-characters 'normal-1-6
(load-image character-sheet character-sheet-type 1 6 1 1 32 32))
(dict-set! x4-characters 'normal-1-7
(load-image character-sheet character-sheet-type 1 7 1 1 32 32))
(dict-set! x4-characters 'normal-1-8
(load-image character-sheet character-sheet-type 1 8 1 1 32 32))
(dict-set! x4-characters 'normal-1-9
(load-image character-sheet character-sheet-type 1 9 1 1 32 32))
(dict-set! x4-characters 'normal-1-10
(load-image character-sheet character-sheet-type 1 10 1 1 32 32))
(dict-set! x4-characters 'normal-1-11
(load-image character-sheet character-sheet-type 1 11 1 1 32 32))
(dict-set! x4-characters 'normal-1-12
(load-image character-sheet character-sheet-type 1 12 1 1 32 32))
(dict-set! x4-characters 'normal-1-13
(load-image character-sheet character-sheet-type 1 13 1 1 32 32))
(dict-set! x4-characters 'normal-1-14
(load-image character-sheet character-sheet-type 1 14 1 1 32 32))
(dict-set! x4-characters 'normal-2-0
(load-image character-sheet character-sheet-type 2 0 1 1 32 32))
(dict-set! x4-characters 'normal-2-1
(load-image character-sheet character-sheet-type 2 1 1 1 32 32))
(dict-set! x4-characters 'normal-2-2
(load-image character-sheet character-sheet-type 2 2 1 1 32 32))
(dict-set! x4-characters 'normal-2-3
(load-image character-sheet character-sheet-type 2 3 1 1 32 32))
(dict-set! x4-characters 'normal-2-4
(load-image character-sheet character-sheet-type 2 4 1 1 32 32))
(dict-set! x4-characters 'normal-2-5
(load-image character-sheet character-sheet-type 2 5 1 1 32 32))
(dict-set! x4-characters 'normal-2-6
(load-image character-sheet character-sheet-type 2 6 1 1 32 32))
(dict-set! x4-characters 'normal-2-7
(load-image character-sheet character-sheet-type 2 7 1 1 32 32))
(dict-set! x4-characters 'normal-2-8
(load-image character-sheet character-sheet-type 2 8 1 1 32 32))
(dict-set! x4-characters 'normal-2-9
(load-image character-sheet character-sheet-type 2 9 1 1 32 32))
(dict-set! x4-characters 'normal-2-10
(load-image character-sheet character-sheet-type 2 10 1 1 32 32))
(dict-set! x4-characters 'normal-2-11
(load-image character-sheet character-sheet-type 2 11 1 1 32 32))
(dict-set! x4-characters 'normal-2-12
(load-image character-sheet character-sheet-type 2 12 1 1 32 32))
(dict-set! x4-characters 'normal-2-13
(load-image character-sheet character-sheet-type 2 13 1 1 32 32))
(dict-set! x4-characters 'normal-2-14
(load-image character-sheet character-sheet-type 2 14 1 1 32 32))
(dict-set! x4-characters 'normal-3-0
(load-image character-sheet character-sheet-type 3 0 1 1 32 32))
(dict-set! x4-characters 'normal-3-1
(load-image character-sheet character-sheet-type 3 1 1 1 32 32))
(dict-set! x4-characters 'normal-3-2
(load-image character-sheet character-sheet-type 3 2 1 1 32 32))
(dict-set! x4-characters 'normal-3-3
(load-image character-sheet character-sheet-type 3 3 1 1 32 32))
(dict-set! x4-characters 'normal-3-4
(load-image character-sheet character-sheet-type 3 4 1 1 32 32))
(dict-set! x4-characters 'normal-3-5
(load-image character-sheet character-sheet-type 3 5 1 1 32 32))
(dict-set! x4-characters 'normal-3-6
(load-image character-sheet character-sheet-type 3 6 1 1 32 32))
(dict-set! x4-characters 'normal-3-7
(load-image character-sheet character-sheet-type 3 7 1 1 32 32))
(dict-set! x4-characters 'normal-3-8
(load-image character-sheet character-sheet-type 3 8 1 1 32 32))
(dict-set! x4-characters 'normal-3-9
(load-image character-sheet character-sheet-type 3 9 1 1 32 32))
(dict-set! x4-characters 'normal-3-10
(load-image character-sheet character-sheet-type 3 10 1 1 32 32))
(dict-set! x4-characters 'normal-3-11
(load-image character-sheet character-sheet-type 3 11 1 1 32 32))
(dict-set! x4-characters 'normal-3-12
(load-image character-sheet character-sheet-type 3 12 1 1 32 32))
(dict-set! x4-characters 'normal-3-13
(load-image character-sheet character-sheet-type 3 13 1 1 32 32))
(dict-set! x4-characters 'normal-3-14
(load-image character-sheet character-sheet-type 3 14 1 1 32 32))
(dict-set! x4-characters 'normal-4-0
(load-image character-sheet character-sheet-type 4 0 1 1 32 32))
(dict-set! x4-characters 'normal-4-1
(load-image character-sheet character-sheet-type 4 1 1 1 32 32))
(dict-set! x4-characters 'normal-4-2
(load-image character-sheet character-sheet-type 4 2 1 1 32 32))
(dict-set! x4-characters 'normal-4-3
(load-image character-sheet character-sheet-type 4 3 1 1 32 32))
(dict-set! x4-characters 'normal-4-4
(load-image character-sheet character-sheet-type 4 4 1 1 32 32))
(dict-set! x4-characters 'normal-4-5
(load-image character-sheet character-sheet-type 4 5 1 1 32 32))
(dict-set! x4-characters 'normal-4-6
(load-image character-sheet character-sheet-type 4 6 1 1 32 32))
(dict-set! x4-characters 'normal-4-7
(load-image character-sheet character-sheet-type 4 7 1 1 32 32))
(dict-set! x4-characters 'normal-4-8
(load-image character-sheet character-sheet-type 4 8 1 1 32 32))
(dict-set! x4-characters 'normal-4-9
(load-image character-sheet character-sheet-type 4 9 1 1 32 32))
(dict-set! x4-characters 'normal-4-10
(load-image character-sheet character-sheet-type 4 10 1 1 32 32))
(dict-set! x4-characters 'normal-4-11
(load-image character-sheet character-sheet-type 4 11 1 1 32 32))
(dict-set! x4-characters 'normal-4-12
(load-image character-sheet character-sheet-type 4 12 1 1 32 32))
(dict-set! x4-characters 'normal-4-13
(load-image character-sheet character-sheet-type 4 13 1 1 32 32))
(dict-set! x4-characters 'normal-4-14
(load-image character-sheet character-sheet-type 4 14 1 1 32 32))
(dict-set! x4-characters 'normal-5-0
(load-image character-sheet character-sheet-type 5 0 1 1 32 32))
(dict-set! x4-characters 'normal-5-1
(load-image character-sheet character-sheet-type 5 1 1 1 32 32))
(dict-set! x4-characters 'normal-5-2
(load-image character-sheet character-sheet-type 5 2 1 1 32 32))
(dict-set! x4-characters 'normal-5-3
(load-image character-sheet character-sheet-type 5 3 1 1 32 32))
(dict-set! x4-characters 'normal-5-4
(load-image character-sheet character-sheet-type 5 4 1 1 32 32))
(dict-set! x4-characters 'normal-5-5
(load-image character-sheet character-sheet-type 5 5 1 1 32 32))
(dict-set! x4-characters 'normal-5-6
(load-image character-sheet character-sheet-type 5 6 1 1 32 32))
(dict-set! x4-characters 'normal-5-7
(load-image character-sheet character-sheet-type 5 7 1 1 32 32))
(dict-set! x4-characters 'normal-5-8
(load-image character-sheet character-sheet-type 5 8 1 1 32 32))
(dict-set! x4-characters 'normal-5-9
(load-image character-sheet character-sheet-type 5 9 1 1 32 32))
(dict-set! x4-characters 'normal-5-10
(load-image character-sheet character-sheet-type 5 10 1 1 32 32))
(dict-set! x4-characters 'normal-5-11
(load-image character-sheet character-sheet-type 5 11 1 1 32 32))
(dict-set! x4-characters 'normal-5-12
(load-image character-sheet character-sheet-type 5 12 1 1 32 32))
(dict-set! x4-characters 'normal-5-13
(load-image character-sheet character-sheet-type 5 13 1 1 32 32))
(dict-set! x4-characters 'normal-5-14
(load-image character-sheet character-sheet-type 5 14 1 1 32 32))
(dict-set! x4-characters 'normal-6-0
(load-image character-sheet character-sheet-type 6 0 1 1 32 32))
(dict-set! x4-characters 'normal-6-1
(load-image character-sheet character-sheet-type 6 1 1 1 32 32))
(dict-set! x4-characters 'normal-6-2
(load-image character-sheet character-sheet-type 6 2 1 1 32 32))
(dict-set! x4-characters 'normal-6-3
(load-image character-sheet character-sheet-type 6 3 1 1 32 32))
(dict-set! x4-characters 'normal-6-4
(load-image character-sheet character-sheet-type 6 4 1 1 32 32))
(dict-set! x4-characters 'normal-6-5
(load-image character-sheet character-sheet-type 6 5 1 1 32 32))
(dict-set! x4-characters 'normal-6-6
(load-image character-sheet character-sheet-type 6 6 1 1 32 32))
(dict-set! x4-characters 'normal-6-7
(load-image character-sheet character-sheet-type 6 7 1 1 32 32))
(dict-set! x4-characters 'normal-6-8
(load-image character-sheet character-sheet-type 6 8 1 1 32 32))
(dict-set! x4-characters 'normal-6-9
(load-image character-sheet character-sheet-type 6 9 1 1 32 32))
(dict-set! x4-characters 'normal-6-10
(load-image character-sheet character-sheet-type 6 10 1 1 32 32))
(dict-set! x4-characters 'normal-6-11
(load-image character-sheet character-sheet-type 6 11 1 1 32 32))
(dict-set! x4-characters 'normal-6-12
(load-image character-sheet character-sheet-type 6 12 1 1 32 32))
(dict-set! x4-characters 'normal-6-13
(load-image character-sheet character-sheet-type 6 13 1 1 32 32))
(dict-set! x4-characters 'normal-6-14
(load-image character-sheet character-sheet-type 6 14 1 1 32 32))
(dict-set! x4-characters 'normal-7-0
(load-image character-sheet character-sheet-type 7 0 1 1 32 32))
(dict-set! x4-characters 'normal-7-1
(load-image character-sheet character-sheet-type 7 1 1 1 32 32))
(dict-set! x4-characters 'normal-7-2
(load-image character-sheet character-sheet-type 7 2 1 1 32 32))
(dict-set! x4-characters 'normal-7-3
(load-image character-sheet character-sheet-type 7 3 1 1 32 32))
(dict-set! x4-characters 'normal-7-4
(load-image character-sheet character-sheet-type 7 4 1 1 32 32))
(dict-set! x4-characters 'normal-7-5
(load-image character-sheet character-sheet-type 7 5 1 1 32 32))
(dict-set! x4-characters 'normal-7-6
(load-image character-sheet character-sheet-type 7 6 1 1 32 32))
(dict-set! x4-characters 'normal-7-7
(load-image character-sheet character-sheet-type 7 7 1 1 32 32))
(dict-set! x4-characters 'normal-7-8
(load-image character-sheet character-sheet-type 7 8 1 1 32 32))
(dict-set! x4-characters 'normal-7-9
(load-image character-sheet character-sheet-type 7 9 1 1 32 32))
(dict-set! x4-characters 'normal-7-10
(load-image character-sheet character-sheet-type 7 10 1 1 32 32))
(dict-set! x4-characters 'normal-7-11
(load-image character-sheet character-sheet-type 7 11 1 1 32 32))
(dict-set! x4-characters 'normal-7-12
(load-image character-sheet character-sheet-type 7 12 1 1 32 32))
(dict-set! x4-characters 'normal-7-13
(load-image character-sheet character-sheet-type 7 13 1 1 32 32))
(dict-set! x4-characters 'normal-7-14
(load-image character-sheet character-sheet-type 7 14 1 1 32 32))
(dict-set! x4-characters 'normal-8-0
(load-image character-sheet character-sheet-type 8 0 1 1 32 32))
(dict-set! x4-characters 'normal-8-1
(load-image character-sheet character-sheet-type 8 1 1 1 32 32))
(dict-set! x4-characters 'normal-8-2
(load-image character-sheet character-sheet-type 8 2 1 1 32 32))
(dict-set! x4-characters 'normal-8-3
(load-image character-sheet character-sheet-type 8 3 1 1 32 32))
(dict-set! x4-characters 'normal-8-4
(load-image character-sheet character-sheet-type 8 4 1 1 32 32))
(dict-set! x4-characters 'normal-8-5
(load-image character-sheet character-sheet-type 8 5 1 1 32 32))
(dict-set! x4-characters 'normal-8-6
(load-image character-sheet character-sheet-type 8 6 1 1 32 32))
(dict-set! x4-characters 'normal-8-7
(load-image character-sheet character-sheet-type 8 7 1 1 32 32))
(dict-set! x4-characters 'normal-8-8
(load-image character-sheet character-sheet-type 8 8 1 1 32 32))
(dict-set! x4-characters 'normal-8-9
(load-image character-sheet character-sheet-type 8 9 1 1 32 32))
(dict-set! x4-characters 'normal-8-10
(load-image character-sheet character-sheet-type 8 10 1 1 32 32))
(dict-set! x4-characters 'normal-8-11
(load-image character-sheet character-sheet-type 8 11 1 1 32 32))
(dict-set! x4-characters 'normal-8-12
(load-image character-sheet character-sheet-type 8 12 1 1 32 32))
(dict-set! x4-characters 'normal-8-13
(load-image character-sheet character-sheet-type 8 13 1 1 32 32))
(dict-set! x4-characters 'normal-8-14
(load-image character-sheet character-sheet-type 8 14 1 1 32 32))
(dict-set! x4-characters 'normal-9-0
(load-image character-sheet character-sheet-type 9 0 1 1 32 32))
(dict-set! x4-characters 'normal-9-1
(load-image character-sheet character-sheet-type 9 1 1 1 32 32))
(dict-set! x4-characters 'normal-9-2
(load-image character-sheet character-sheet-type 9 2 1 1 32 32))
(dict-set! x4-characters 'normal-9-3
(load-image character-sheet character-sheet-type 9 3 1 1 32 32))
(dict-set! x4-characters 'normal-9-4
(load-image character-sheet character-sheet-type 9 4 1 1 32 32))
(dict-set! x4-characters 'normal-9-5
(load-image character-sheet character-sheet-type 9 5 1 1 32 32))
(dict-set! x4-characters 'normal-9-6
(load-image character-sheet character-sheet-type 9 6 1 1 32 32))
(dict-set! x4-characters 'normal-9-7
(load-image character-sheet character-sheet-type 9 7 1 1 32 32))
(dict-set! x4-characters 'normal-9-8
(load-image character-sheet character-sheet-type 9 8 1 1 32 32))
(dict-set! x4-characters 'normal-9-9
(load-image character-sheet character-sheet-type 9 9 1 1 32 32))
(dict-set! x4-characters 'normal-9-10
(load-image character-sheet character-sheet-type 9 10 1 1 32 32))
(dict-set! x4-characters 'normal-9-11
(load-image character-sheet character-sheet-type 9 11 1 1 32 32))
(dict-set! x4-characters 'normal-9-12
(load-image character-sheet character-sheet-type 9 12 1 1 32 32))
(dict-set! x4-characters 'normal-9-13
(load-image character-sheet character-sheet-type 9 13 1 1 32 32))
(dict-set! x4-characters 'normal-9-14
(load-image character-sheet character-sheet-type 9 14 1 1 32 32))
(dict-set! x4-characters 'normal-10-0
(load-image character-sheet character-sheet-type 10 0 1 1 32 32))
(dict-set! x4-characters 'normal-10-1
(load-image character-sheet character-sheet-type 10 1 1 1 32 32))
(dict-set! x4-characters 'normal-10-2
(load-image character-sheet character-sheet-type 10 2 1 1 32 32))
(dict-set! x4-characters 'normal-10-3
(load-image character-sheet character-sheet-type 10 3 1 1 32 32))
(dict-set! x4-characters 'normal-10-4
(load-image character-sheet character-sheet-type 10 4 1 1 32 32))
(dict-set! x4-characters 'normal-10-5
(load-image character-sheet character-sheet-type 10 5 1 1 32 32))
(dict-set! x4-characters 'normal-10-6
(load-image character-sheet character-sheet-type 10 6 1 1 32 32))
(dict-set! x4-characters 'normal-10-7
(load-image character-sheet character-sheet-type 10 7 1 1 32 32))
(dict-set! x4-characters 'normal-10-8
(load-image character-sheet character-sheet-type 10 8 1 1 32 32))
(dict-set! x4-characters 'normal-10-9
(load-image character-sheet character-sheet-type 10 9 1 1 32 32))
(dict-set! x4-characters 'normal-10-10
(load-image character-sheet character-sheet-type 10 10 1 1 32 32))
(dict-set! x4-characters 'normal-10-11
(load-image character-sheet character-sheet-type 10 11 1 1 32 32))
(dict-set! x4-characters 'normal-10-12
(load-image character-sheet character-sheet-type 10 12 1 1 32 32))
(dict-set! x4-characters 'normal-10-13
(load-image character-sheet character-sheet-type 10 13 1 1 32 32))
(dict-set! x4-characters 'normal-10-14
(load-image character-sheet character-sheet-type 10 14 1 1 32 32))
(dict-set! x4-characters 'normal-11-0
(load-image character-sheet character-sheet-type 11 0 1 1 32 32))
(dict-set! x4-characters 'normal-11-1
(load-image character-sheet character-sheet-type 11 1 1 1 32 32))
(dict-set! x4-characters 'normal-11-2
(load-image character-sheet character-sheet-type 11 2 1 1 32 32))
(dict-set! x4-characters 'normal-11-3
(load-image character-sheet character-sheet-type 11 3 1 1 32 32))
(dict-set! x4-characters 'normal-11-4
(load-image character-sheet character-sheet-type 11 4 1 1 32 32))
(dict-set! x4-characters 'normal-11-5
(load-image character-sheet character-sheet-type 11 5 1 1 32 32))
(dict-set! x4-characters 'normal-11-6
(load-image character-sheet character-sheet-type 11 6 1 1 32 32))
(dict-set! x4-characters 'normal-11-7
(load-image character-sheet character-sheet-type 11 7 1 1 32 32))
(dict-set! x4-characters 'normal-11-8
(load-image character-sheet character-sheet-type 11 8 1 1 32 32))
(dict-set! x4-characters 'normal-11-9
(load-image character-sheet character-sheet-type 11 9 1 1 32 32))
(dict-set! x4-characters 'normal-11-10
(load-image character-sheet character-sheet-type 11 10 1 1 32 32))
(dict-set! x4-characters 'normal-11-11
(load-image character-sheet character-sheet-type 11 11 1 1 32 32))
(dict-set! x4-characters 'normal-11-12
(load-image character-sheet character-sheet-type 11 12 1 1 32 32))
(dict-set! x4-characters 'normal-11-13
(load-image character-sheet character-sheet-type 11 13 1 1 32 32))
(dict-set! x4-characters 'normal-11-14
(load-image character-sheet character-sheet-type 11 14 1 1 32 32))
(dict-set! x4-characters 'normal-12-0
(load-image character-sheet character-sheet-type 12 0 1 1 32 32))
(dict-set! x4-characters 'normal-12-1
(load-image character-sheet character-sheet-type 12 1 1 1 32 32))
(dict-set! x4-characters 'normal-12-2
(load-image character-sheet character-sheet-type 12 2 1 1 32 32))
(dict-set! x4-characters 'normal-12-3
(load-image character-sheet character-sheet-type 12 3 1 1 32 32))
(dict-set! x4-characters 'normal-12-4
(load-image character-sheet character-sheet-type 12 4 1 1 32 32))
(dict-set! x4-characters 'normal-12-5
(load-image character-sheet character-sheet-type 12 5 1 1 32 32))
(dict-set! x4-characters 'normal-12-6
(load-image character-sheet character-sheet-type 12 6 1 1 32 32))
(dict-set! x4-characters 'normal-12-7
(load-image character-sheet character-sheet-type 12 7 1 1 32 32))
(dict-set! x4-characters 'normal-12-8
(load-image character-sheet character-sheet-type 12 8 1 1 32 32))
(dict-set! x4-characters 'normal-12-9
(load-image character-sheet character-sheet-type 12 9 1 1 32 32))
(dict-set! x4-characters 'normal-12-10
(load-image character-sheet character-sheet-type 12 10 1 1 32 32))
(dict-set! x4-characters 'normal-12-11
(load-image character-sheet character-sheet-type 12 11 1 1 32 32))
(dict-set! x4-characters 'normal-12-12
(load-image character-sheet character-sheet-type 12 12 1 1 32 32))
(dict-set! x4-characters 'normal-12-13
(load-image character-sheet character-sheet-type 12 13 1 1 32 32))
(dict-set! x4-characters 'normal-12-14
(load-image character-sheet character-sheet-type 12 14 1 1 32 32))
(dict-set! x4-characters 'normal-13-0
(load-image character-sheet character-sheet-type 13 0 1 1 32 32))
(dict-set! x4-characters 'normal-13-1
(load-image character-sheet character-sheet-type 13 1 1 1 32 32))
(dict-set! x4-characters 'normal-13-2
(load-image character-sheet character-sheet-type 13 2 1 1 32 32))
(dict-set! x4-characters 'normal-13-3
(load-image character-sheet character-sheet-type 13 3 1 1 32 32))
(dict-set! x4-characters 'normal-13-4
(load-image character-sheet character-sheet-type 13 4 1 1 32 32))
(dict-set! x4-characters 'normal-13-5
(load-image character-sheet character-sheet-type 13 5 1 1 32 32))
(dict-set! x4-characters 'normal-13-6
(load-image character-sheet character-sheet-type 13 6 1 1 32 32))
(dict-set! x4-characters 'normal-13-7
(load-image character-sheet character-sheet-type 13 7 1 1 32 32))
(dict-set! x4-characters 'normal-13-8
(load-image character-sheet character-sheet-type 13 8 1 1 32 32))
(dict-set! x4-characters 'normal-13-9
(load-image character-sheet character-sheet-type 13 9 1 1 32 32))
(dict-set! x4-characters 'normal-13-10
(load-image character-sheet character-sheet-type 13 10 1 1 32 32))
(dict-set! x4-characters 'normal-13-11
(load-image character-sheet character-sheet-type 13 11 1 1 32 32))
(dict-set! x4-characters 'normal-13-12
(load-image character-sheet character-sheet-type 13 12 1 1 32 32))
(dict-set! x4-characters 'normal-13-13
(load-image character-sheet character-sheet-type 13 13 1 1 32 32))
(dict-set! x4-characters 'normal-13-14
(load-image character-sheet character-sheet-type 13 14 1 1 32 32))
(dict-set! x4-characters 'normal-14-0
(load-image character-sheet character-sheet-type 14 0 1 1 32 32))
(dict-set! x4-characters 'normal-14-1
(load-image character-sheet character-sheet-type 14 1 1 1 32 32))
(dict-set! x4-characters 'normal-14-2
(load-image character-sheet character-sheet-type 14 2 1 1 32 32))
(dict-set! x4-characters 'normal-14-3
(load-image character-sheet character-sheet-type 14 3 1 1 32 32))
(dict-set! x4-characters 'normal-14-4
(load-image character-sheet character-sheet-type 14 4 1 1 32 32))
(dict-set! x4-characters 'normal-14-5
(load-image character-sheet character-sheet-type 14 5 1 1 32 32))
(dict-set! x4-characters 'normal-14-6
(load-image character-sheet character-sheet-type 14 6 1 1 32 32))
(dict-set! x4-characters 'normal-14-7
(load-image character-sheet character-sheet-type 14 7 1 1 32 32))
(dict-set! x4-characters 'normal-14-8
(load-image character-sheet character-sheet-type 14 8 1 1 32 32))
(dict-set! x4-characters 'normal-14-9
(load-image character-sheet character-sheet-type 14 9 1 1 32 32))
(dict-set! x4-characters 'normal-14-10
(load-image character-sheet character-sheet-type 14 10 1 1 32 32))
(dict-set! x4-characters 'normal-14-11
(load-image character-sheet character-sheet-type 14 11 1 1 32 32))
(dict-set! x4-characters 'normal-14-12
(load-image character-sheet character-sheet-type 14 12 1 1 32 32))
(dict-set! x4-characters 'normal-14-13
(load-image character-sheet character-sheet-type 14 13 1 1 32 32))
(dict-set! x4-characters 'normal-14-14
(load-image character-sheet character-sheet-type 14 14 1 1 32 32))
(dict-set! x4-characters 'normal-15-0
(load-image character-sheet character-sheet-type 15 0 1 1 32 32))
(dict-set! x4-characters 'normal-15-1
(load-image character-sheet character-sheet-type 15 1 1 1 32 32))
(dict-set! x4-characters 'normal-15-2
(load-image character-sheet character-sheet-type 15 2 1 1 32 32))
(dict-set! x4-characters 'normal-15-3
(load-image character-sheet character-sheet-type 15 3 1 1 32 32))
(dict-set! x4-characters 'normal-15-4
(load-image character-sheet character-sheet-type 15 4 1 1 32 32))
(dict-set! x4-characters 'normal-15-5
(load-image character-sheet character-sheet-type 15 5 1 1 32 32))
(dict-set! x4-characters 'normal-15-6
(load-image character-sheet character-sheet-type 15 6 1 1 32 32))
(dict-set! x4-characters 'normal-15-7
(load-image character-sheet character-sheet-type 15 7 1 1 32 32))
(dict-set! x4-characters 'normal-15-8
(load-image character-sheet character-sheet-type 15 8 1 1 32 32))
(dict-set! x4-characters 'normal-15-9
(load-image character-sheet character-sheet-type 15 9 1 1 32 32))
(dict-set! x4-characters 'normal-15-10
(load-image character-sheet character-sheet-type 15 10 1 1 32 32))
(dict-set! x4-characters 'normal-15-11
(load-image character-sheet character-sheet-type 15 11 1 1 32 32))
(dict-set! x4-characters 'normal-15-12
(load-image character-sheet character-sheet-type 15 12 1 1 32 32))
(dict-set! x4-characters 'normal-15-13
(load-image character-sheet character-sheet-type 15 13 1 1 32 32))
(dict-set! x4-characters 'normal-15-14
(load-image character-sheet character-sheet-type 15 14 1 1 32 32))
| null | https://raw.githubusercontent.com/VincentToups/racket-lib/d8aed0959fd148615b000ceecd7b8a6128cfcfa8/roguelikes/lofi-graphics.rkt | racket | #lang racket
(require racket/gui
racket/class
racket/dict
utilities/sprite-loading)
(define character-sheet "./oryx-lofi/x4-lofi_char.png")
(define character-sheet-type 'png/alpha)
(define x4-characters (make-hash))
(dict-set! x4-characters 'normal-0-0
(load-image character-sheet character-sheet-type 0 0 1 1 32 32))
(dict-set! x4-characters 'human-ranger (dict-ref x4-characters 'normal-0-0))
(dict-set! x4-characters 'normal-0-1
(load-image character-sheet character-sheet-type 0 1 1 1 32 32))
(dict-set! x4-characters 'human-soldier (dict-ref x4-characters 'normal-0-1))
(dict-set! x4-characters 'normal-0-2
(load-image character-sheet character-sheet-type 0 2 1 1 32 32))
(dict-set! x4-characters 'human-mage (dict-ref x4-characters 'normal-0-2))
(dict-set! x4-characters 'normal-0-3
(load-image character-sheet character-sheet-type 0 3 1 1 32 32))
(dict-set! x4-characters 'human-rogue (dict-ref x4-characters 'normal-0-3))
(dict-set! x4-characters 'normal-0-4
(load-image character-sheet character-sheet-type 0 4 1 1 32 32))
(dict-set! x4-characters 'human-red-mage (dict-ref x4-characters 'normal-0-4))
(dict-set! x4-characters 'normal-0-5
(load-image character-sheet character-sheet-type 0 5 1 1 32 32))
(dict-set! x4-characters 'human-paladin (dict-ref x4-characters 'normal-0-5))
(dict-set! x4-characters 'normal-0-6
(load-image character-sheet character-sheet-type 0 6 1 1 32 32))
(dict-set! x4-characters 'normal-0-7
(load-image character-sheet character-sheet-type 0 7 1 1 32 32))
(dict-set! x4-characters 'normal-0-8
(load-image character-sheet character-sheet-type 0 8 1 1 32 32))
(dict-set! x4-characters 'normal-0-9
(load-image character-sheet character-sheet-type 0 9 1 1 32 32))
(dict-set! x4-characters 'normal-0-10
(load-image character-sheet character-sheet-type 0 10 1 1 32 32))
(dict-set! x4-characters 'normal-0-11
(load-image character-sheet character-sheet-type 0 11 1 1 32 32))
(dict-set! x4-characters 'normal-0-12
(load-image character-sheet character-sheet-type 0 12 1 1 32 32))
(dict-set! x4-characters 'normal-0-13
(load-image character-sheet character-sheet-type 0 13 1 1 32 32))
(dict-set! x4-characters 'normal-0-14
(load-image character-sheet character-sheet-type 0 14 1 1 32 32))
(dict-set! x4-characters 'normal-1-0
(load-image character-sheet character-sheet-type 1 0 1 1 32 32))
(dict-set! x4-characters 'normal-1-1
(load-image character-sheet character-sheet-type 1 1 1 1 32 32))
(dict-set! x4-characters 'normal-1-2
(load-image character-sheet character-sheet-type 1 2 1 1 32 32))
(dict-set! x4-characters 'normal-1-3
(load-image character-sheet character-sheet-type 1 3 1 1 32 32))
(dict-set! x4-characters 'normal-1-4
(load-image character-sheet character-sheet-type 1 4 1 1 32 32))
(dict-set! x4-characters 'normal-1-5
(load-image character-sheet character-sheet-type 1 5 1 1 32 32))
(dict-set! x4-characters 'normal-1-6
(load-image character-sheet character-sheet-type 1 6 1 1 32 32))
(dict-set! x4-characters 'normal-1-7
(load-image character-sheet character-sheet-type 1 7 1 1 32 32))
(dict-set! x4-characters 'normal-1-8
(load-image character-sheet character-sheet-type 1 8 1 1 32 32))
(dict-set! x4-characters 'normal-1-9
(load-image character-sheet character-sheet-type 1 9 1 1 32 32))
(dict-set! x4-characters 'normal-1-10
(load-image character-sheet character-sheet-type 1 10 1 1 32 32))
(dict-set! x4-characters 'normal-1-11
(load-image character-sheet character-sheet-type 1 11 1 1 32 32))
(dict-set! x4-characters 'normal-1-12
(load-image character-sheet character-sheet-type 1 12 1 1 32 32))
(dict-set! x4-characters 'normal-1-13
(load-image character-sheet character-sheet-type 1 13 1 1 32 32))
(dict-set! x4-characters 'normal-1-14
(load-image character-sheet character-sheet-type 1 14 1 1 32 32))
(dict-set! x4-characters 'normal-2-0
(load-image character-sheet character-sheet-type 2 0 1 1 32 32))
(dict-set! x4-characters 'normal-2-1
(load-image character-sheet character-sheet-type 2 1 1 1 32 32))
(dict-set! x4-characters 'normal-2-2
(load-image character-sheet character-sheet-type 2 2 1 1 32 32))
(dict-set! x4-characters 'normal-2-3
(load-image character-sheet character-sheet-type 2 3 1 1 32 32))
(dict-set! x4-characters 'normal-2-4
(load-image character-sheet character-sheet-type 2 4 1 1 32 32))
(dict-set! x4-characters 'normal-2-5
(load-image character-sheet character-sheet-type 2 5 1 1 32 32))
(dict-set! x4-characters 'normal-2-6
(load-image character-sheet character-sheet-type 2 6 1 1 32 32))
(dict-set! x4-characters 'normal-2-7
(load-image character-sheet character-sheet-type 2 7 1 1 32 32))
(dict-set! x4-characters 'normal-2-8
(load-image character-sheet character-sheet-type 2 8 1 1 32 32))
(dict-set! x4-characters 'normal-2-9
(load-image character-sheet character-sheet-type 2 9 1 1 32 32))
(dict-set! x4-characters 'normal-2-10
(load-image character-sheet character-sheet-type 2 10 1 1 32 32))
(dict-set! x4-characters 'normal-2-11
(load-image character-sheet character-sheet-type 2 11 1 1 32 32))
(dict-set! x4-characters 'normal-2-12
(load-image character-sheet character-sheet-type 2 12 1 1 32 32))
(dict-set! x4-characters 'normal-2-13
(load-image character-sheet character-sheet-type 2 13 1 1 32 32))
(dict-set! x4-characters 'normal-2-14
(load-image character-sheet character-sheet-type 2 14 1 1 32 32))
(dict-set! x4-characters 'normal-3-0
(load-image character-sheet character-sheet-type 3 0 1 1 32 32))
(dict-set! x4-characters 'normal-3-1
(load-image character-sheet character-sheet-type 3 1 1 1 32 32))
(dict-set! x4-characters 'normal-3-2
(load-image character-sheet character-sheet-type 3 2 1 1 32 32))
(dict-set! x4-characters 'normal-3-3
(load-image character-sheet character-sheet-type 3 3 1 1 32 32))
(dict-set! x4-characters 'normal-3-4
(load-image character-sheet character-sheet-type 3 4 1 1 32 32))
(dict-set! x4-characters 'normal-3-5
(load-image character-sheet character-sheet-type 3 5 1 1 32 32))
(dict-set! x4-characters 'normal-3-6
(load-image character-sheet character-sheet-type 3 6 1 1 32 32))
(dict-set! x4-characters 'normal-3-7
(load-image character-sheet character-sheet-type 3 7 1 1 32 32))
(dict-set! x4-characters 'normal-3-8
(load-image character-sheet character-sheet-type 3 8 1 1 32 32))
(dict-set! x4-characters 'normal-3-9
(load-image character-sheet character-sheet-type 3 9 1 1 32 32))
(dict-set! x4-characters 'normal-3-10
(load-image character-sheet character-sheet-type 3 10 1 1 32 32))
(dict-set! x4-characters 'normal-3-11
(load-image character-sheet character-sheet-type 3 11 1 1 32 32))
(dict-set! x4-characters 'normal-3-12
(load-image character-sheet character-sheet-type 3 12 1 1 32 32))
(dict-set! x4-characters 'normal-3-13
(load-image character-sheet character-sheet-type 3 13 1 1 32 32))
(dict-set! x4-characters 'normal-3-14
(load-image character-sheet character-sheet-type 3 14 1 1 32 32))
(dict-set! x4-characters 'normal-4-0
(load-image character-sheet character-sheet-type 4 0 1 1 32 32))
(dict-set! x4-characters 'normal-4-1
(load-image character-sheet character-sheet-type 4 1 1 1 32 32))
(dict-set! x4-characters 'normal-4-2
(load-image character-sheet character-sheet-type 4 2 1 1 32 32))
(dict-set! x4-characters 'normal-4-3
(load-image character-sheet character-sheet-type 4 3 1 1 32 32))
(dict-set! x4-characters 'normal-4-4
(load-image character-sheet character-sheet-type 4 4 1 1 32 32))
(dict-set! x4-characters 'normal-4-5
(load-image character-sheet character-sheet-type 4 5 1 1 32 32))
(dict-set! x4-characters 'normal-4-6
(load-image character-sheet character-sheet-type 4 6 1 1 32 32))
(dict-set! x4-characters 'normal-4-7
(load-image character-sheet character-sheet-type 4 7 1 1 32 32))
(dict-set! x4-characters 'normal-4-8
(load-image character-sheet character-sheet-type 4 8 1 1 32 32))
(dict-set! x4-characters 'normal-4-9
(load-image character-sheet character-sheet-type 4 9 1 1 32 32))
(dict-set! x4-characters 'normal-4-10
(load-image character-sheet character-sheet-type 4 10 1 1 32 32))
(dict-set! x4-characters 'normal-4-11
(load-image character-sheet character-sheet-type 4 11 1 1 32 32))
(dict-set! x4-characters 'normal-4-12
(load-image character-sheet character-sheet-type 4 12 1 1 32 32))
(dict-set! x4-characters 'normal-4-13
(load-image character-sheet character-sheet-type 4 13 1 1 32 32))
(dict-set! x4-characters 'normal-4-14
(load-image character-sheet character-sheet-type 4 14 1 1 32 32))
(dict-set! x4-characters 'normal-5-0
(load-image character-sheet character-sheet-type 5 0 1 1 32 32))
(dict-set! x4-characters 'normal-5-1
(load-image character-sheet character-sheet-type 5 1 1 1 32 32))
(dict-set! x4-characters 'normal-5-2
(load-image character-sheet character-sheet-type 5 2 1 1 32 32))
(dict-set! x4-characters 'normal-5-3
(load-image character-sheet character-sheet-type 5 3 1 1 32 32))
(dict-set! x4-characters 'normal-5-4
(load-image character-sheet character-sheet-type 5 4 1 1 32 32))
(dict-set! x4-characters 'normal-5-5
(load-image character-sheet character-sheet-type 5 5 1 1 32 32))
(dict-set! x4-characters 'normal-5-6
(load-image character-sheet character-sheet-type 5 6 1 1 32 32))
(dict-set! x4-characters 'normal-5-7
(load-image character-sheet character-sheet-type 5 7 1 1 32 32))
(dict-set! x4-characters 'normal-5-8
(load-image character-sheet character-sheet-type 5 8 1 1 32 32))
(dict-set! x4-characters 'normal-5-9
(load-image character-sheet character-sheet-type 5 9 1 1 32 32))
(dict-set! x4-characters 'normal-5-10
(load-image character-sheet character-sheet-type 5 10 1 1 32 32))
(dict-set! x4-characters 'normal-5-11
(load-image character-sheet character-sheet-type 5 11 1 1 32 32))
(dict-set! x4-characters 'normal-5-12
(load-image character-sheet character-sheet-type 5 12 1 1 32 32))
(dict-set! x4-characters 'normal-5-13
(load-image character-sheet character-sheet-type 5 13 1 1 32 32))
(dict-set! x4-characters 'normal-5-14
(load-image character-sheet character-sheet-type 5 14 1 1 32 32))
(dict-set! x4-characters 'normal-6-0
(load-image character-sheet character-sheet-type 6 0 1 1 32 32))
(dict-set! x4-characters 'normal-6-1
(load-image character-sheet character-sheet-type 6 1 1 1 32 32))
(dict-set! x4-characters 'normal-6-2
(load-image character-sheet character-sheet-type 6 2 1 1 32 32))
(dict-set! x4-characters 'normal-6-3
(load-image character-sheet character-sheet-type 6 3 1 1 32 32))
(dict-set! x4-characters 'normal-6-4
(load-image character-sheet character-sheet-type 6 4 1 1 32 32))
(dict-set! x4-characters 'normal-6-5
(load-image character-sheet character-sheet-type 6 5 1 1 32 32))
(dict-set! x4-characters 'normal-6-6
(load-image character-sheet character-sheet-type 6 6 1 1 32 32))
(dict-set! x4-characters 'normal-6-7
(load-image character-sheet character-sheet-type 6 7 1 1 32 32))
(dict-set! x4-characters 'normal-6-8
(load-image character-sheet character-sheet-type 6 8 1 1 32 32))
(dict-set! x4-characters 'normal-6-9
(load-image character-sheet character-sheet-type 6 9 1 1 32 32))
(dict-set! x4-characters 'normal-6-10
(load-image character-sheet character-sheet-type 6 10 1 1 32 32))
(dict-set! x4-characters 'normal-6-11
(load-image character-sheet character-sheet-type 6 11 1 1 32 32))
(dict-set! x4-characters 'normal-6-12
(load-image character-sheet character-sheet-type 6 12 1 1 32 32))
(dict-set! x4-characters 'normal-6-13
(load-image character-sheet character-sheet-type 6 13 1 1 32 32))
(dict-set! x4-characters 'normal-6-14
(load-image character-sheet character-sheet-type 6 14 1 1 32 32))
(dict-set! x4-characters 'normal-7-0
(load-image character-sheet character-sheet-type 7 0 1 1 32 32))
(dict-set! x4-characters 'normal-7-1
(load-image character-sheet character-sheet-type 7 1 1 1 32 32))
(dict-set! x4-characters 'normal-7-2
(load-image character-sheet character-sheet-type 7 2 1 1 32 32))
(dict-set! x4-characters 'normal-7-3
(load-image character-sheet character-sheet-type 7 3 1 1 32 32))
(dict-set! x4-characters 'normal-7-4
(load-image character-sheet character-sheet-type 7 4 1 1 32 32))
(dict-set! x4-characters 'normal-7-5
(load-image character-sheet character-sheet-type 7 5 1 1 32 32))
(dict-set! x4-characters 'normal-7-6
(load-image character-sheet character-sheet-type 7 6 1 1 32 32))
(dict-set! x4-characters 'normal-7-7
(load-image character-sheet character-sheet-type 7 7 1 1 32 32))
(dict-set! x4-characters 'normal-7-8
(load-image character-sheet character-sheet-type 7 8 1 1 32 32))
(dict-set! x4-characters 'normal-7-9
(load-image character-sheet character-sheet-type 7 9 1 1 32 32))
(dict-set! x4-characters 'normal-7-10
(load-image character-sheet character-sheet-type 7 10 1 1 32 32))
(dict-set! x4-characters 'normal-7-11
(load-image character-sheet character-sheet-type 7 11 1 1 32 32))
(dict-set! x4-characters 'normal-7-12
(load-image character-sheet character-sheet-type 7 12 1 1 32 32))
(dict-set! x4-characters 'normal-7-13
(load-image character-sheet character-sheet-type 7 13 1 1 32 32))
(dict-set! x4-characters 'normal-7-14
(load-image character-sheet character-sheet-type 7 14 1 1 32 32))
(dict-set! x4-characters 'normal-8-0
(load-image character-sheet character-sheet-type 8 0 1 1 32 32))
(dict-set! x4-characters 'normal-8-1
(load-image character-sheet character-sheet-type 8 1 1 1 32 32))
(dict-set! x4-characters 'normal-8-2
(load-image character-sheet character-sheet-type 8 2 1 1 32 32))
(dict-set! x4-characters 'normal-8-3
(load-image character-sheet character-sheet-type 8 3 1 1 32 32))
(dict-set! x4-characters 'normal-8-4
(load-image character-sheet character-sheet-type 8 4 1 1 32 32))
(dict-set! x4-characters 'normal-8-5
(load-image character-sheet character-sheet-type 8 5 1 1 32 32))
(dict-set! x4-characters 'normal-8-6
(load-image character-sheet character-sheet-type 8 6 1 1 32 32))
(dict-set! x4-characters 'normal-8-7
(load-image character-sheet character-sheet-type 8 7 1 1 32 32))
(dict-set! x4-characters 'normal-8-8
(load-image character-sheet character-sheet-type 8 8 1 1 32 32))
(dict-set! x4-characters 'normal-8-9
(load-image character-sheet character-sheet-type 8 9 1 1 32 32))
(dict-set! x4-characters 'normal-8-10
(load-image character-sheet character-sheet-type 8 10 1 1 32 32))
(dict-set! x4-characters 'normal-8-11
(load-image character-sheet character-sheet-type 8 11 1 1 32 32))
(dict-set! x4-characters 'normal-8-12
(load-image character-sheet character-sheet-type 8 12 1 1 32 32))
(dict-set! x4-characters 'normal-8-13
(load-image character-sheet character-sheet-type 8 13 1 1 32 32))
(dict-set! x4-characters 'normal-8-14
(load-image character-sheet character-sheet-type 8 14 1 1 32 32))
(dict-set! x4-characters 'normal-9-0
(load-image character-sheet character-sheet-type 9 0 1 1 32 32))
(dict-set! x4-characters 'normal-9-1
(load-image character-sheet character-sheet-type 9 1 1 1 32 32))
(dict-set! x4-characters 'normal-9-2
(load-image character-sheet character-sheet-type 9 2 1 1 32 32))
(dict-set! x4-characters 'normal-9-3
(load-image character-sheet character-sheet-type 9 3 1 1 32 32))
(dict-set! x4-characters 'normal-9-4
(load-image character-sheet character-sheet-type 9 4 1 1 32 32))
(dict-set! x4-characters 'normal-9-5
(load-image character-sheet character-sheet-type 9 5 1 1 32 32))
(dict-set! x4-characters 'normal-9-6
(load-image character-sheet character-sheet-type 9 6 1 1 32 32))
(dict-set! x4-characters 'normal-9-7
(load-image character-sheet character-sheet-type 9 7 1 1 32 32))
(dict-set! x4-characters 'normal-9-8
(load-image character-sheet character-sheet-type 9 8 1 1 32 32))
(dict-set! x4-characters 'normal-9-9
(load-image character-sheet character-sheet-type 9 9 1 1 32 32))
(dict-set! x4-characters 'normal-9-10
(load-image character-sheet character-sheet-type 9 10 1 1 32 32))
(dict-set! x4-characters 'normal-9-11
(load-image character-sheet character-sheet-type 9 11 1 1 32 32))
(dict-set! x4-characters 'normal-9-12
(load-image character-sheet character-sheet-type 9 12 1 1 32 32))
(dict-set! x4-characters 'normal-9-13
(load-image character-sheet character-sheet-type 9 13 1 1 32 32))
(dict-set! x4-characters 'normal-9-14
(load-image character-sheet character-sheet-type 9 14 1 1 32 32))
(dict-set! x4-characters 'normal-10-0
(load-image character-sheet character-sheet-type 10 0 1 1 32 32))
(dict-set! x4-characters 'normal-10-1
(load-image character-sheet character-sheet-type 10 1 1 1 32 32))
(dict-set! x4-characters 'normal-10-2
(load-image character-sheet character-sheet-type 10 2 1 1 32 32))
(dict-set! x4-characters 'normal-10-3
(load-image character-sheet character-sheet-type 10 3 1 1 32 32))
(dict-set! x4-characters 'normal-10-4
(load-image character-sheet character-sheet-type 10 4 1 1 32 32))
(dict-set! x4-characters 'normal-10-5
(load-image character-sheet character-sheet-type 10 5 1 1 32 32))
(dict-set! x4-characters 'normal-10-6
(load-image character-sheet character-sheet-type 10 6 1 1 32 32))
(dict-set! x4-characters 'normal-10-7
(load-image character-sheet character-sheet-type 10 7 1 1 32 32))
(dict-set! x4-characters 'normal-10-8
(load-image character-sheet character-sheet-type 10 8 1 1 32 32))
(dict-set! x4-characters 'normal-10-9
(load-image character-sheet character-sheet-type 10 9 1 1 32 32))
(dict-set! x4-characters 'normal-10-10
(load-image character-sheet character-sheet-type 10 10 1 1 32 32))
(dict-set! x4-characters 'normal-10-11
(load-image character-sheet character-sheet-type 10 11 1 1 32 32))
(dict-set! x4-characters 'normal-10-12
(load-image character-sheet character-sheet-type 10 12 1 1 32 32))
(dict-set! x4-characters 'normal-10-13
(load-image character-sheet character-sheet-type 10 13 1 1 32 32))
(dict-set! x4-characters 'normal-10-14
(load-image character-sheet character-sheet-type 10 14 1 1 32 32))
(dict-set! x4-characters 'normal-11-0
(load-image character-sheet character-sheet-type 11 0 1 1 32 32))
(dict-set! x4-characters 'normal-11-1
(load-image character-sheet character-sheet-type 11 1 1 1 32 32))
(dict-set! x4-characters 'normal-11-2
(load-image character-sheet character-sheet-type 11 2 1 1 32 32))
(dict-set! x4-characters 'normal-11-3
(load-image character-sheet character-sheet-type 11 3 1 1 32 32))
(dict-set! x4-characters 'normal-11-4
(load-image character-sheet character-sheet-type 11 4 1 1 32 32))
(dict-set! x4-characters 'normal-11-5
(load-image character-sheet character-sheet-type 11 5 1 1 32 32))
(dict-set! x4-characters 'normal-11-6
(load-image character-sheet character-sheet-type 11 6 1 1 32 32))
(dict-set! x4-characters 'normal-11-7
(load-image character-sheet character-sheet-type 11 7 1 1 32 32))
(dict-set! x4-characters 'normal-11-8
(load-image character-sheet character-sheet-type 11 8 1 1 32 32))
(dict-set! x4-characters 'normal-11-9
(load-image character-sheet character-sheet-type 11 9 1 1 32 32))
(dict-set! x4-characters 'normal-11-10
(load-image character-sheet character-sheet-type 11 10 1 1 32 32))
(dict-set! x4-characters 'normal-11-11
(load-image character-sheet character-sheet-type 11 11 1 1 32 32))
(dict-set! x4-characters 'normal-11-12
(load-image character-sheet character-sheet-type 11 12 1 1 32 32))
(dict-set! x4-characters 'normal-11-13
(load-image character-sheet character-sheet-type 11 13 1 1 32 32))
(dict-set! x4-characters 'normal-11-14
(load-image character-sheet character-sheet-type 11 14 1 1 32 32))
(dict-set! x4-characters 'normal-12-0
(load-image character-sheet character-sheet-type 12 0 1 1 32 32))
(dict-set! x4-characters 'normal-12-1
(load-image character-sheet character-sheet-type 12 1 1 1 32 32))
(dict-set! x4-characters 'normal-12-2
(load-image character-sheet character-sheet-type 12 2 1 1 32 32))
(dict-set! x4-characters 'normal-12-3
(load-image character-sheet character-sheet-type 12 3 1 1 32 32))
(dict-set! x4-characters 'normal-12-4
(load-image character-sheet character-sheet-type 12 4 1 1 32 32))
(dict-set! x4-characters 'normal-12-5
(load-image character-sheet character-sheet-type 12 5 1 1 32 32))
(dict-set! x4-characters 'normal-12-6
(load-image character-sheet character-sheet-type 12 6 1 1 32 32))
(dict-set! x4-characters 'normal-12-7
(load-image character-sheet character-sheet-type 12 7 1 1 32 32))
(dict-set! x4-characters 'normal-12-8
(load-image character-sheet character-sheet-type 12 8 1 1 32 32))
(dict-set! x4-characters 'normal-12-9
(load-image character-sheet character-sheet-type 12 9 1 1 32 32))
(dict-set! x4-characters 'normal-12-10
(load-image character-sheet character-sheet-type 12 10 1 1 32 32))
(dict-set! x4-characters 'normal-12-11
(load-image character-sheet character-sheet-type 12 11 1 1 32 32))
(dict-set! x4-characters 'normal-12-12
(load-image character-sheet character-sheet-type 12 12 1 1 32 32))
(dict-set! x4-characters 'normal-12-13
(load-image character-sheet character-sheet-type 12 13 1 1 32 32))
(dict-set! x4-characters 'normal-12-14
(load-image character-sheet character-sheet-type 12 14 1 1 32 32))
(dict-set! x4-characters 'normal-13-0
(load-image character-sheet character-sheet-type 13 0 1 1 32 32))
(dict-set! x4-characters 'normal-13-1
(load-image character-sheet character-sheet-type 13 1 1 1 32 32))
(dict-set! x4-characters 'normal-13-2
(load-image character-sheet character-sheet-type 13 2 1 1 32 32))
(dict-set! x4-characters 'normal-13-3
(load-image character-sheet character-sheet-type 13 3 1 1 32 32))
(dict-set! x4-characters 'normal-13-4
(load-image character-sheet character-sheet-type 13 4 1 1 32 32))
(dict-set! x4-characters 'normal-13-5
(load-image character-sheet character-sheet-type 13 5 1 1 32 32))
(dict-set! x4-characters 'normal-13-6
(load-image character-sheet character-sheet-type 13 6 1 1 32 32))
(dict-set! x4-characters 'normal-13-7
(load-image character-sheet character-sheet-type 13 7 1 1 32 32))
(dict-set! x4-characters 'normal-13-8
(load-image character-sheet character-sheet-type 13 8 1 1 32 32))
(dict-set! x4-characters 'normal-13-9
(load-image character-sheet character-sheet-type 13 9 1 1 32 32))
(dict-set! x4-characters 'normal-13-10
(load-image character-sheet character-sheet-type 13 10 1 1 32 32))
(dict-set! x4-characters 'normal-13-11
(load-image character-sheet character-sheet-type 13 11 1 1 32 32))
(dict-set! x4-characters 'normal-13-12
(load-image character-sheet character-sheet-type 13 12 1 1 32 32))
(dict-set! x4-characters 'normal-13-13
(load-image character-sheet character-sheet-type 13 13 1 1 32 32))
(dict-set! x4-characters 'normal-13-14
(load-image character-sheet character-sheet-type 13 14 1 1 32 32))
(dict-set! x4-characters 'normal-14-0
(load-image character-sheet character-sheet-type 14 0 1 1 32 32))
(dict-set! x4-characters 'normal-14-1
(load-image character-sheet character-sheet-type 14 1 1 1 32 32))
(dict-set! x4-characters 'normal-14-2
(load-image character-sheet character-sheet-type 14 2 1 1 32 32))
(dict-set! x4-characters 'normal-14-3
(load-image character-sheet character-sheet-type 14 3 1 1 32 32))
(dict-set! x4-characters 'normal-14-4
(load-image character-sheet character-sheet-type 14 4 1 1 32 32))
(dict-set! x4-characters 'normal-14-5
(load-image character-sheet character-sheet-type 14 5 1 1 32 32))
(dict-set! x4-characters 'normal-14-6
(load-image character-sheet character-sheet-type 14 6 1 1 32 32))
(dict-set! x4-characters 'normal-14-7
(load-image character-sheet character-sheet-type 14 7 1 1 32 32))
(dict-set! x4-characters 'normal-14-8
(load-image character-sheet character-sheet-type 14 8 1 1 32 32))
(dict-set! x4-characters 'normal-14-9
(load-image character-sheet character-sheet-type 14 9 1 1 32 32))
(dict-set! x4-characters 'normal-14-10
(load-image character-sheet character-sheet-type 14 10 1 1 32 32))
(dict-set! x4-characters 'normal-14-11
(load-image character-sheet character-sheet-type 14 11 1 1 32 32))
(dict-set! x4-characters 'normal-14-12
(load-image character-sheet character-sheet-type 14 12 1 1 32 32))
(dict-set! x4-characters 'normal-14-13
(load-image character-sheet character-sheet-type 14 13 1 1 32 32))
(dict-set! x4-characters 'normal-14-14
(load-image character-sheet character-sheet-type 14 14 1 1 32 32))
(dict-set! x4-characters 'normal-15-0
(load-image character-sheet character-sheet-type 15 0 1 1 32 32))
(dict-set! x4-characters 'normal-15-1
(load-image character-sheet character-sheet-type 15 1 1 1 32 32))
(dict-set! x4-characters 'normal-15-2
(load-image character-sheet character-sheet-type 15 2 1 1 32 32))
(dict-set! x4-characters 'normal-15-3
(load-image character-sheet character-sheet-type 15 3 1 1 32 32))
(dict-set! x4-characters 'normal-15-4
(load-image character-sheet character-sheet-type 15 4 1 1 32 32))
(dict-set! x4-characters 'normal-15-5
(load-image character-sheet character-sheet-type 15 5 1 1 32 32))
(dict-set! x4-characters 'normal-15-6
(load-image character-sheet character-sheet-type 15 6 1 1 32 32))
(dict-set! x4-characters 'normal-15-7
(load-image character-sheet character-sheet-type 15 7 1 1 32 32))
(dict-set! x4-characters 'normal-15-8
(load-image character-sheet character-sheet-type 15 8 1 1 32 32))
(dict-set! x4-characters 'normal-15-9
(load-image character-sheet character-sheet-type 15 9 1 1 32 32))
(dict-set! x4-characters 'normal-15-10
(load-image character-sheet character-sheet-type 15 10 1 1 32 32))
(dict-set! x4-characters 'normal-15-11
(load-image character-sheet character-sheet-type 15 11 1 1 32 32))
(dict-set! x4-characters 'normal-15-12
(load-image character-sheet character-sheet-type 15 12 1 1 32 32))
(dict-set! x4-characters 'normal-15-13
(load-image character-sheet character-sheet-type 15 13 1 1 32 32))
(dict-set! x4-characters 'normal-15-14
(load-image character-sheet character-sheet-type 15 14 1 1 32 32))
|
|
c0b3b77e90a795c4bdedc94e9c9c23ddd99f5b47b45506b1370b7fdfa449c2b6 | acl2/acl2 | remove-guard-holders.lisp | Copyright ( C ) 2021 , ForrestHunt , Inc.
Written by
License : A 3 - clause BSD license . See the LICENSE file distributed with ACL2 .
; This book was renamed from file remove-guard-holders-strong-3.lsp in late
January , 2023 and also significantly expanded from that file .
; This book contains progress towards converting ACL2 source function
; remove-guard-holders from :program mode to guard-verified :logic mode. (Note
; that :logic mode ACL2 source functions must be guard-verified.) See the book
; remove-guard-holders-future.lisp for additional work, extending the present
; book, towards that task, especially if you are interested in making
; additional such progress.
; The theorems we export are only those that seem safe, in that including this
; book seems unlikely to mess with proofs. That basically limits the exported
; theorems to :forward-chaining rules and rewrite rules hung on a function
; symbol explicitly addressed by this book; for example,
; weak-splo-extracts-tuple-listp-append is non-local since it is hung on
; weak-splo-extracts-tuple-listp.
; Perhaps it would make sense to eliminate weak-badge-userfn-structure-alistp
; in favor of the new badge-userfn-structure-alistp -- but existing books would
; then need to be modified, notably
; books/system/remove-guard-holders1.lisp and
; books/system/remove-guard-holders-weak.lisp.
(in-package "ACL2")
(include-book "tools/flag" :dir :system)
(include-book "remove-guard-holders1")
(include-book "remove-guard-holders-weak")
(include-book "termp")
(include-book "subst-var")
(include-book "subcor-var")
(verify-termination flatten-ands-in-lit-lst) ; and guards
(verify-termination translate-declaration-to-guard/integer-gen) ; and guards
(verify-termination subst-each-for-var) ; and guards
(verify-termination possibly-dirty-lambda-objectp1) ; and guards
(verify-termination translate-declaration-to-guard1-gen) ; and guards
(verify-termination translate-declaration-to-guard-gen) ; and guards
(verify-termination subst-each-for-var) ; and guards
(local (in-theory (disable remove-guard-holders-weak)))
; This is needed for (verify-termination executable-badge ...).
(defthm
badge-userfn-structure-alistp-implies-weak-badge-userfn-structure-alistp
(implies (badge-userfn-structure-alistp x)
(weak-badge-userfn-structure-alistp x))
:rule-classes :forward-chaining)
#+acl2-devel ; avoid error for redundant def. with raw Lisp code
(verify-termination ilks-plist-worldp) ; and guards
(defthm ilks-plist-worldp-forward-to-plist-worldp
(implies (ilks-plist-worldp w)
(plist-worldp w))
:rule-classes :forward-chaining)
(defthm ilks-plist-worldp-forward-to-alistp-for-badge-userfn-structure
(implies
(ilks-plist-worldp wrld)
(and (alistp (fgetprop 'badge-table 'table-alist nil wrld))
(alistp (cdr (assoc-equal :badge-userfn-structure
(fgetprop 'badge-table 'table-alist nil
wrld))))))
:rule-classes :forward-chaining)
(local
(defthm weak-badge-userfn-structure-alistp-implies-consp-cdr-assoc-equal
(implies (and (weak-badge-userfn-structure-alistp alist)
(cdr (assoc-equal fn alist)))
(consp (cdr (assoc-equal fn alist))))))
(defthm weak-badge-userfn-structure-alistp-forward-to-alistp
(implies (weak-badge-userfn-structure-alistp alist)
(alistp alist))
:rule-classes :forward-chaining)
(local
(defthm consp-assoc-equal-forced
(implies (and (force (alistp l))
(assoc-equal name l))
(consp (assoc-equal name l)))))
(local
(defthm weak-badge-userfn-structure-alistp-implies-consp-cddr-assoc-equal
(implies (and (weak-badge-userfn-structure-alistp alist)
(cddr (assoc-equal fn alist)))
(consp (cddr (assoc-equal fn alist))))))
(verify-termination executable-badge ; and guards
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
(local
(defthm executable-tamep-1
; bpf is *badge-prim-falist* value
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cdr (hons-assoc-equal fn bpf)))
(consp (cdr (hons-assoc-equal fn bpf))))))
(local
(defthm executable-tamep-2
; bpf is *badge-prim-falist* value
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cddr (hons-assoc-equal fn bpf)))
(consp (cddr (hons-assoc-equal fn bpf))))))
(local
(defthm executable-tamep-3
; bpf is *badge-prim-falist* value
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cdddr (hons-assoc-equal fn bpf)))
(consp (cdddr (hons-assoc-equal fn bpf))))))
(local
(defthm executable-tamep-4
(implies (and (weak-badge-userfn-structure-alistp alist)
(caddr (assoc-equal fn alist)))
(consp (caddr (assoc-equal fn alist))))))
(local
(defthm executable-tamep-5
(implies (and (weak-badge-userfn-structure-alistp alist)
(cdr (caddr (assoc-equal fn alist))))
(consp (cdr (caddr (assoc-equal fn alist)))))))
(local
(defthm executable-tamep-6
(implies (and (weak-badge-userfn-structure-alistp alist)
(cddr (caddr (assoc-equal fn alist))))
(consp (cddr (caddr (assoc-equal fn alist)))))))
(local
(defthm executable-tamep-7-8
; bpf is *badge-prim-falist* value
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cdr (hons-assoc-equal fn bpf)))
(natp (caddr (hons-assoc-equal fn bpf))))
:rule-classes :type-prescription))
(local
(defthm executable-tamep-8
; bpf is *badge-prim-falist* value
(implies (and (apply$-badge-alistp-ilks-t bpf)
(hons-assoc-equal fn bpf))
(equal (cddddr (hons-assoc-equal fn bpf))
t))))
(local
(defthm executable-tamep-9
(implies (and (badge-userfn-structure-alistp alist)
(caddr (assoc-equal fn alist)))
(natp (cadr (caddr (assoc-equal fn alist)))))
:rule-classes :type-prescription))
(local
(defthm executable-tamep-10
(implies (and (badge-userfn-structure-alistp alist)
(not (equal (cdddr (caddr (assoc-equal fn alist)))
t)))
(true-listp (cdddr (caddr (assoc-equal fn alist)))))))
(verify-termination executable-tamep ; and guards
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
(verify-termination weak-splo-extracts-tuple-listp) ; and guards
(verify-termination well-formed-lambda-objectp1
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
; Now working towards verify-termination for
; syntactically-plausible-lambda-objectp.
(verify-termination translate-declaration-to-guard1-gen)
(verify-termination translate-declaration-to-guard-gen)
(local
(defthm symbol-listp-implies-pseudo-term-listp
(implies (symbol-listp x)
(pseudo-term-listp x))))
(local
(make-flag flag-translate-declaration-to-guard-gen
translate-declaration-to-guard-gen))
(local
(defthm-flag-translate-declaration-to-guard-gen
(defthm pseudo-termp-translate-declaration-to-guard-gen
(implies (and (pseudo-termp var)
(equal tflg t))
(pseudo-termp
(translate-declaration-to-guard-gen x var tflg wrld)))
:flag translate-declaration-to-guard-gen)
(defthm pseudo-term-listp-translate-declaration-to-guard-gen
(implies (and (pseudo-termp var)
(equal tflg t))
(pseudo-term-listp
(translate-declaration-to-guard-gen-lst l var tflg wrld)))
:flag translate-declaration-to-guard-gen-lst)))
(verify-termination type-expressions-from-type-spec)
(verify-termination syntactically-plausible-lambda-objectp1)
(local
(defthm syntactically-plausible-lambda-objectp-termination-lemma-1
(< (acl2-count
(mv-nth 5
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard)))
(+ 5
(acl2-count edcls)
(acl2-count formals)
(acl2-count guard)))
:rule-classes :linear))
Copied exactly ( 11/18/2015 and ) from ACL2 source file axioms.lisp ,
; towards guard verification for make-lambda-application:
(local
(encapsulate
()
; We wish to prove symbol-listp-all-vars1, below, so that we can verify the
; guards on all-vars1. But it is in a mutually recursive clique. Our strategy
is simple : ( 1 ) define the flagged version of the clique , ( 2 ) prove that it is
equal to the given pair of official functions , ( 3 ) prove that it has the
desired property and ( 4 ) then obtain the desired property of the official
function by instantiation of the theorem proved in step 3 , using the theorem
proved in step 2 to rewrite the flagged flagged calls in that instance to the
; official ones.
; Note: It would probably be better to make all-vars1/all-vars1-lst local,
; since it's really not of any interest outside the guard verification of
; all-vars1. However, since we are passing through this file more than once,
; that does not seem to be an option.
(local
(defun all-vars1/all-vars1-lst (flg lst ans)
(if (eq flg 'all-vars1)
(cond ((variablep lst) (add-to-set-eq lst ans))
((fquotep lst) ans)
(t (all-vars1/all-vars1-lst 'all-vars-lst1 (cdr lst) ans)))
(cond ((endp lst) ans)
(t (all-vars1/all-vars1-lst 'all-vars-lst1 (cdr lst)
(all-vars1/all-vars1-lst 'all-vars1 (car lst) ans)))))))
(local
(defthm step-1-lemma
(equal (all-vars1/all-vars1-lst flg lst ans)
(if (equal flg 'all-vars1) (all-vars1 lst ans) (all-vars1-lst lst ans)))))
(local
(defthm step-2-lemma
(implies (and (symbol-listp ans)
(if (equal flg 'all-vars1)
(pseudo-termp lst)
(pseudo-term-listp lst)))
(symbol-listp (all-vars1/all-vars1-lst flg lst ans)))))
(defthm symbol-listp-all-vars1
(implies (and (symbol-listp ans)
(pseudo-termp lst))
(symbol-listp (all-vars1 lst ans)))
:hints (("Goal" :use (:instance step-2-lemma (flg 'all-vars1)))))))
(local
(defthm arglistp1-implies-symbol-listp
(implies (arglistp1 x)
(symbol-listp x))
:hints (("Goal" :in-theory (enable arglistp1)))))
(local
(defthm pseudo-term-listp-revappend
(implies (true-listp x)
(equal (pseudo-term-listp (revappend x y))
(and (pseudo-term-listp x)
(pseudo-term-listp y))))))
(set-induction-depth-limit 1)
(local
(defthm member-symbol-listp-forward-to-symbolp
(implies (and (member-equal a x)
(symbol-listp x))
(symbolp a))
:rule-classes :forward-chaining))
; The following gives us pseudo-termp-subst-var.
(include-book "system/subst-var" :dir :system)
(local
(defthm pseudo-term-listp-subst-each-for-var
(implies (and (pseudo-term-listp new-lst)
(variablep old)
(pseudo-termp term))
(pseudo-term-listp (subst-each-for-var new-lst old term)))))
(local
(defthm subset-symbol-listp-forward-to-symbol-listp
(implies (and (subsetp-equal x y)
(true-listp x)
(symbol-listp y))
(symbol-listp x))
:rule-classes :forward-chaining))
(local
(defthm pseudo-term-listp-mv-nth-3-syntactically-plausible-lambda-objectp1
(implies
(and (car (syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))
(symbol-listp formals)
(pseudo-term-listp type-exprs))
(pseudo-term-listp
(mv-nth 3
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))))))
(local
(defthm symbol-listp-set-difference-equal
(implies (symbol-listp x)
(symbol-listp (set-difference-equal x y)))))
(local
(encapsulate
()
(local (defthm symbol-listp-revappend-lemma
(implies (not (symbol-listp y))
(not (symbol-listp (revappend x y))))))
(defthm symbol-listp-revappend
(implies (true-listp x)
(equal (symbol-listp (revappend x y))
(and (symbol-listp x)
(symbol-listp y)))))))
(local
(defthm true-listp-mv-nth-1-syntactically-plausible-lambda-objectp1
(implies
(and (car (syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))
(symbol-listp ignores)
(symbol-listp formals))
(true-listp
(mv-nth 1
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))))))
(local
(defthm true-listp-mv-nth-2-syntactically-plausible-lambda-objectp1
(implies
(and (car (syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))
(symbol-listp ignorables)
(symbol-listp formals))
(true-listp
(mv-nth 2
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))))))
(defthm arglistp-forward-to-symbol-listp
(implies (arglistp x)
(symbol-listp x ))
:rule-classes :forward-chaining)
(verify-termination syntactically-plausible-lambda-objectp
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
; Start towards verify-termination for clean-up-dirty-lambda-objects.
(verify-termination expand-all-lambdas)
(local
(make-flag flag-expand-all-lambdas
expand-all-lambdas
:flag-mapping ((expand-all-lambdas term)
(expand-all-lambdas-lst terms))))
(local
(defthm len-expand-all-lambdas-lst
(equal (len (expand-all-lambdas-lst terms))
(len terms))))
(local
(defthm pseudo-termp-forward-to-pseudo-term-listp-cdr
(implies (and (pseudo-termp x)
(consp x)
(consp (car x)))
(pseudo-term-listp (cdr x)))
:rule-classes :forward-chaining))
(local
(defthm-flag-expand-all-lambdas
(defthm type-of-pseudo-termp
(implies (pseudo-termp term)
(pseudo-termp (expand-all-lambdas term)))
:flag term)
(defthm pseudo-term-listp-expand-all-lambdas-lst
(implies (pseudo-term-listp terms)
(pseudo-term-listp (expand-all-lambdas-lst terms)))
:flag terms)))
(verify-guards expand-all-lambdas)
; Start verify-termination for warrants-for-tamep
(verify-termination find-warrant-function-name)
(verify-termination warrants-for-tamep
(declare (xargs :verify-guards nil)))
(local
(make-flag flag-warrants-for-tamep
warrants-for-tamep
:flag-mapping ((warrants-for-tamep term)
(warrants-for-tamep-functionp fn)
(warrants-for-suitably-tamep-listp lst))))
(local
(defthm-flag-warrants-for-tamep
(defthm true-listp-car-warrants-for-tamep
(implies (and (ilks-plist-worldp wrld)
(executable-tamep x wrld)
(true-listp warrants))
(true-listp (car (warrants-for-tamep
x wrld warrants unwarranteds))))
:flag term)
(defthm true-listp-car-warrants-for-tamep-functionp
(implies (and (ilks-plist-worldp wrld)
(executable-tamep-functionp fn wrld)
(true-listp warrants))
(true-listp (car (warrants-for-tamep-functionp
fn wrld warrants unwarranteds))))
:flag fn)
(defthm true-listp-car-warrants-for-warrants-for-suitably-tamep-listp
(implies (and (ilks-plist-worldp wrld)
(executable-suitably-tamep-listp flags args wrld)
(true-listp warrants))
(true-listp (car (warrants-for-suitably-tamep-listp
flags args wrld warrants unwarranteds))))
:flag lst)))
(local
(defthm-flag-warrants-for-tamep
(defthm symbol-listp-mv-nth-1-warrants-for-tamep
(implies (and (ilks-plist-worldp wrld)
(executable-tamep x wrld)
(symbol-listp unwarranteds))
(symbol-listp (mv-nth 1 (warrants-for-tamep
x wrld warrants unwarranteds))))
:flag term)
(defthm symbol-listp-mv-nth-1-warrants-for-tamep-functionp
(implies (and (ilks-plist-worldp wrld)
(executable-tamep-functionp fn wrld)
(symbol-listp unwarranteds))
(symbol-listp (mv-nth 1 (warrants-for-tamep-functionp
fn wrld warrants unwarranteds))))
:flag fn)
(defthm symbol-listp-mv-nth-1-warrants-for-warrants-for-suitably-tamep-listp
(implies (and (ilks-plist-worldp wrld)
(executable-suitably-tamep-listp flags args wrld)
(symbol-listp unwarranteds))
(symbol-listp (mv-nth 1 (warrants-for-suitably-tamep-listp
flags args wrld warrants unwarranteds))))
:flag lst)))
(verify-guards warrants-for-tamep)
(verify-termination weak-splo-extracts-tuple-listp) ; and guards
; The following doesn't seem to be needed, but it's probably harmless.
(defthm weak-splo-extracts-tuple-listp-forward-to-true-listp
(implies (weak-splo-extracts-tuple-listp x)
(true-listp x))
:rule-classes :forward-chaining)
(defthm weak-splo-extracts-tuple-listp-append
(implies (true-listp x)
(equal (weak-splo-extracts-tuple-listp (append x y))
(and (weak-splo-extracts-tuple-listp x)
(weak-splo-extracts-tuple-listp y)))))
(verify-termination type-expressions-from-type-spec)
(verify-termination syntactically-plausible-lambda-objectp1
(declare (xargs :verify-guards nil)))
(verify-termination syntactically-plausible-lambda-objectp
(declare (xargs :verify-guards nil)))
(local
(make-flag flag-syntactically-plausible-lambda-objectp
syntactically-plausible-lambda-objectp
:flag-mapping
((syntactically-plausible-lambda-objectp main)
(syntactically-plausible-lambda-objectsp-within within)
(syntactically-plausible-lambda-objectsp-within-lst listp))))
(local
(defthm-flag-syntactically-plausible-lambda-objectp
(defthm weak-splo-extracts-tuple-listp-1
(implies (syntactically-plausible-lambda-objectp gflg x)
(weak-splo-extracts-tuple-listp
(syntactically-plausible-lambda-objectp gflg x)))
:flag main)
(defthm weak-splo-extracts-tuple-listp-2
(let ((ans (syntactically-plausible-lambda-objectsp-within gflg body)))
(implies ans
(or (weak-splo-extracts-tuple-listp ans)
(equal ans t))))
:rule-classes nil
:flag within)
(defthm weak-splo-extracts-tuple-listp-3
(let ((ans (syntactically-plausible-lambda-objectsp-within-lst gflg args)))
(implies ans
(or (weak-splo-extracts-tuple-listp ans)
(equal ans t))))
:rule-classes nil
:flag listp)))
(defthm weak-splo-extracts-tuple-listp-of-syntactically-plausible-lambda-objectp
(implies
(syntactically-plausible-lambda-objectp nil x)
(weak-splo-extracts-tuple-listp
(syntactically-plausible-lambda-objectp nil x))))
(verify-termination well-formed-lambda-objectp)
(verify-termination possibly-dirty-lambda-objectp)
(verify-guards possibly-dirty-lambda-objectp)
; The following events go through, but it will take some work to remove the
; skip-proofs; see system/remove-guard-holders-future.lisp. Please do not
; convert to logic mode here without also verifying guards, since that could
; slow down the system.
; (skip-proofs (verify-termination clean-up-dirty-lambda-objects))
; (skip-proofs (verify-termination may-contain-dirty-lambda-objectsp))
; (verify-termination possibly-clean-up-dirty-lambda-objects)
; (skip-proofs (verify-guards possibly-clean-up-dirty-lambda-objects))
; (verify-termination remove-guard-holders)
| null | https://raw.githubusercontent.com/acl2/acl2/eb1c548cabe41b555e80af3a52296c306694d168/books/system/remove-guard-holders.lisp | lisp | This book was renamed from file remove-guard-holders-strong-3.lsp in late
This book contains progress towards converting ACL2 source function
remove-guard-holders from :program mode to guard-verified :logic mode. (Note
that :logic mode ACL2 source functions must be guard-verified.) See the book
remove-guard-holders-future.lisp for additional work, extending the present
book, towards that task, especially if you are interested in making
additional such progress.
The theorems we export are only those that seem safe, in that including this
book seems unlikely to mess with proofs. That basically limits the exported
theorems to :forward-chaining rules and rewrite rules hung on a function
symbol explicitly addressed by this book; for example,
weak-splo-extracts-tuple-listp-append is non-local since it is hung on
weak-splo-extracts-tuple-listp.
Perhaps it would make sense to eliminate weak-badge-userfn-structure-alistp
in favor of the new badge-userfn-structure-alistp -- but existing books would
then need to be modified, notably
books/system/remove-guard-holders1.lisp and
books/system/remove-guard-holders-weak.lisp.
and guards
and guards
and guards
and guards
and guards
and guards
and guards
This is needed for (verify-termination executable-badge ...).
avoid error for redundant def. with raw Lisp code
and guards
and guards
bpf is *badge-prim-falist* value
bpf is *badge-prim-falist* value
bpf is *badge-prim-falist* value
bpf is *badge-prim-falist* value
bpf is *badge-prim-falist* value
and guards
and guards
Now working towards verify-termination for
syntactically-plausible-lambda-objectp.
towards guard verification for make-lambda-application:
We wish to prove symbol-listp-all-vars1, below, so that we can verify the
guards on all-vars1. But it is in a mutually recursive clique. Our strategy
official ones.
Note: It would probably be better to make all-vars1/all-vars1-lst local,
since it's really not of any interest outside the guard verification of
all-vars1. However, since we are passing through this file more than once,
that does not seem to be an option.
The following gives us pseudo-termp-subst-var.
Start towards verify-termination for clean-up-dirty-lambda-objects.
Start verify-termination for warrants-for-tamep
and guards
The following doesn't seem to be needed, but it's probably harmless.
The following events go through, but it will take some work to remove the
skip-proofs; see system/remove-guard-holders-future.lisp. Please do not
convert to logic mode here without also verifying guards, since that could
slow down the system.
(skip-proofs (verify-termination clean-up-dirty-lambda-objects))
(skip-proofs (verify-termination may-contain-dirty-lambda-objectsp))
(verify-termination possibly-clean-up-dirty-lambda-objects)
(skip-proofs (verify-guards possibly-clean-up-dirty-lambda-objects))
(verify-termination remove-guard-holders) | Copyright ( C ) 2021 , ForrestHunt , Inc.
Written by
License : A 3 - clause BSD license . See the LICENSE file distributed with ACL2 .
January , 2023 and also significantly expanded from that file .
(in-package "ACL2")
(include-book "tools/flag" :dir :system)
(include-book "remove-guard-holders1")
(include-book "remove-guard-holders-weak")
(include-book "termp")
(include-book "subst-var")
(include-book "subcor-var")
(local (in-theory (disable remove-guard-holders-weak)))
(defthm
badge-userfn-structure-alistp-implies-weak-badge-userfn-structure-alistp
(implies (badge-userfn-structure-alistp x)
(weak-badge-userfn-structure-alistp x))
:rule-classes :forward-chaining)
(defthm ilks-plist-worldp-forward-to-plist-worldp
(implies (ilks-plist-worldp w)
(plist-worldp w))
:rule-classes :forward-chaining)
(defthm ilks-plist-worldp-forward-to-alistp-for-badge-userfn-structure
(implies
(ilks-plist-worldp wrld)
(and (alistp (fgetprop 'badge-table 'table-alist nil wrld))
(alistp (cdr (assoc-equal :badge-userfn-structure
(fgetprop 'badge-table 'table-alist nil
wrld))))))
:rule-classes :forward-chaining)
(local
(defthm weak-badge-userfn-structure-alistp-implies-consp-cdr-assoc-equal
(implies (and (weak-badge-userfn-structure-alistp alist)
(cdr (assoc-equal fn alist)))
(consp (cdr (assoc-equal fn alist))))))
(defthm weak-badge-userfn-structure-alistp-forward-to-alistp
(implies (weak-badge-userfn-structure-alistp alist)
(alistp alist))
:rule-classes :forward-chaining)
(local
(defthm consp-assoc-equal-forced
(implies (and (force (alistp l))
(assoc-equal name l))
(consp (assoc-equal name l)))))
(local
(defthm weak-badge-userfn-structure-alistp-implies-consp-cddr-assoc-equal
(implies (and (weak-badge-userfn-structure-alistp alist)
(cddr (assoc-equal fn alist)))
(consp (cddr (assoc-equal fn alist))))))
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
(local
(defthm executable-tamep-1
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cdr (hons-assoc-equal fn bpf)))
(consp (cdr (hons-assoc-equal fn bpf))))))
(local
(defthm executable-tamep-2
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cddr (hons-assoc-equal fn bpf)))
(consp (cddr (hons-assoc-equal fn bpf))))))
(local
(defthm executable-tamep-3
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cdddr (hons-assoc-equal fn bpf)))
(consp (cdddr (hons-assoc-equal fn bpf))))))
(local
(defthm executable-tamep-4
(implies (and (weak-badge-userfn-structure-alistp alist)
(caddr (assoc-equal fn alist)))
(consp (caddr (assoc-equal fn alist))))))
(local
(defthm executable-tamep-5
(implies (and (weak-badge-userfn-structure-alistp alist)
(cdr (caddr (assoc-equal fn alist))))
(consp (cdr (caddr (assoc-equal fn alist)))))))
(local
(defthm executable-tamep-6
(implies (and (weak-badge-userfn-structure-alistp alist)
(cddr (caddr (assoc-equal fn alist))))
(consp (cddr (caddr (assoc-equal fn alist)))))))
(local
(defthm executable-tamep-7-8
(implies (and (apply$-badge-alistp-ilks-t bpf)
(cdr (hons-assoc-equal fn bpf)))
(natp (caddr (hons-assoc-equal fn bpf))))
:rule-classes :type-prescription))
(local
(defthm executable-tamep-8
(implies (and (apply$-badge-alistp-ilks-t bpf)
(hons-assoc-equal fn bpf))
(equal (cddddr (hons-assoc-equal fn bpf))
t))))
(local
(defthm executable-tamep-9
(implies (and (badge-userfn-structure-alistp alist)
(caddr (assoc-equal fn alist)))
(natp (cadr (caddr (assoc-equal fn alist)))))
:rule-classes :type-prescription))
(local
(defthm executable-tamep-10
(implies (and (badge-userfn-structure-alistp alist)
(not (equal (cdddr (caddr (assoc-equal fn alist)))
t)))
(true-listp (cdddr (caddr (assoc-equal fn alist)))))))
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
(verify-termination well-formed-lambda-objectp1
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
(verify-termination translate-declaration-to-guard1-gen)
(verify-termination translate-declaration-to-guard-gen)
(local
(defthm symbol-listp-implies-pseudo-term-listp
(implies (symbol-listp x)
(pseudo-term-listp x))))
(local
(make-flag flag-translate-declaration-to-guard-gen
translate-declaration-to-guard-gen))
(local
(defthm-flag-translate-declaration-to-guard-gen
(defthm pseudo-termp-translate-declaration-to-guard-gen
(implies (and (pseudo-termp var)
(equal tflg t))
(pseudo-termp
(translate-declaration-to-guard-gen x var tflg wrld)))
:flag translate-declaration-to-guard-gen)
(defthm pseudo-term-listp-translate-declaration-to-guard-gen
(implies (and (pseudo-termp var)
(equal tflg t))
(pseudo-term-listp
(translate-declaration-to-guard-gen-lst l var tflg wrld)))
:flag translate-declaration-to-guard-gen-lst)))
(verify-termination type-expressions-from-type-spec)
(verify-termination syntactically-plausible-lambda-objectp1)
(local
(defthm syntactically-plausible-lambda-objectp-termination-lemma-1
(< (acl2-count
(mv-nth 5
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard)))
(+ 5
(acl2-count edcls)
(acl2-count formals)
(acl2-count guard)))
:rule-classes :linear))
Copied exactly ( 11/18/2015 and ) from ACL2 source file axioms.lisp ,
(local
(encapsulate
()
is simple : ( 1 ) define the flagged version of the clique , ( 2 ) prove that it is
equal to the given pair of official functions , ( 3 ) prove that it has the
desired property and ( 4 ) then obtain the desired property of the official
function by instantiation of the theorem proved in step 3 , using the theorem
proved in step 2 to rewrite the flagged flagged calls in that instance to the
(local
(defun all-vars1/all-vars1-lst (flg lst ans)
(if (eq flg 'all-vars1)
(cond ((variablep lst) (add-to-set-eq lst ans))
((fquotep lst) ans)
(t (all-vars1/all-vars1-lst 'all-vars-lst1 (cdr lst) ans)))
(cond ((endp lst) ans)
(t (all-vars1/all-vars1-lst 'all-vars-lst1 (cdr lst)
(all-vars1/all-vars1-lst 'all-vars1 (car lst) ans)))))))
(local
(defthm step-1-lemma
(equal (all-vars1/all-vars1-lst flg lst ans)
(if (equal flg 'all-vars1) (all-vars1 lst ans) (all-vars1-lst lst ans)))))
(local
(defthm step-2-lemma
(implies (and (symbol-listp ans)
(if (equal flg 'all-vars1)
(pseudo-termp lst)
(pseudo-term-listp lst)))
(symbol-listp (all-vars1/all-vars1-lst flg lst ans)))))
(defthm symbol-listp-all-vars1
(implies (and (symbol-listp ans)
(pseudo-termp lst))
(symbol-listp (all-vars1 lst ans)))
:hints (("Goal" :use (:instance step-2-lemma (flg 'all-vars1)))))))
(local
(defthm arglistp1-implies-symbol-listp
(implies (arglistp1 x)
(symbol-listp x))
:hints (("Goal" :in-theory (enable arglistp1)))))
(local
(defthm pseudo-term-listp-revappend
(implies (true-listp x)
(equal (pseudo-term-listp (revappend x y))
(and (pseudo-term-listp x)
(pseudo-term-listp y))))))
(set-induction-depth-limit 1)
(local
(defthm member-symbol-listp-forward-to-symbolp
(implies (and (member-equal a x)
(symbol-listp x))
(symbolp a))
:rule-classes :forward-chaining))
(include-book "system/subst-var" :dir :system)
(local
(defthm pseudo-term-listp-subst-each-for-var
(implies (and (pseudo-term-listp new-lst)
(variablep old)
(pseudo-termp term))
(pseudo-term-listp (subst-each-for-var new-lst old term)))))
(local
(defthm subset-symbol-listp-forward-to-symbol-listp
(implies (and (subsetp-equal x y)
(true-listp x)
(symbol-listp y))
(symbol-listp x))
:rule-classes :forward-chaining))
(local
(defthm pseudo-term-listp-mv-nth-3-syntactically-plausible-lambda-objectp1
(implies
(and (car (syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))
(symbol-listp formals)
(pseudo-term-listp type-exprs))
(pseudo-term-listp
(mv-nth 3
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))))))
(local
(defthm symbol-listp-set-difference-equal
(implies (symbol-listp x)
(symbol-listp (set-difference-equal x y)))))
(local
(encapsulate
()
(local (defthm symbol-listp-revappend-lemma
(implies (not (symbol-listp y))
(not (symbol-listp (revappend x y))))))
(defthm symbol-listp-revappend
(implies (true-listp x)
(equal (symbol-listp (revappend x y))
(and (symbol-listp x)
(symbol-listp y)))))))
(local
(defthm true-listp-mv-nth-1-syntactically-plausible-lambda-objectp1
(implies
(and (car (syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))
(symbol-listp ignores)
(symbol-listp formals))
(true-listp
(mv-nth 1
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))))))
(local
(defthm true-listp-mv-nth-2-syntactically-plausible-lambda-objectp1
(implies
(and (car (syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))
(symbol-listp ignorables)
(symbol-listp formals))
(true-listp
(mv-nth 2
(syntactically-plausible-lambda-objectp1
edcls formals ignores ignorables type-exprs satisfies-exprs
guard))))))
(defthm arglistp-forward-to-symbol-listp
(implies (arglistp x)
(symbol-listp x ))
:rule-classes :forward-chaining)
(verify-termination syntactically-plausible-lambda-objectp
(declare (xargs :guard-hints (("Goal" :do-not-induct t)))))
(verify-termination expand-all-lambdas)
(local
(make-flag flag-expand-all-lambdas
expand-all-lambdas
:flag-mapping ((expand-all-lambdas term)
(expand-all-lambdas-lst terms))))
(local
(defthm len-expand-all-lambdas-lst
(equal (len (expand-all-lambdas-lst terms))
(len terms))))
(local
(defthm pseudo-termp-forward-to-pseudo-term-listp-cdr
(implies (and (pseudo-termp x)
(consp x)
(consp (car x)))
(pseudo-term-listp (cdr x)))
:rule-classes :forward-chaining))
(local
(defthm-flag-expand-all-lambdas
(defthm type-of-pseudo-termp
(implies (pseudo-termp term)
(pseudo-termp (expand-all-lambdas term)))
:flag term)
(defthm pseudo-term-listp-expand-all-lambdas-lst
(implies (pseudo-term-listp terms)
(pseudo-term-listp (expand-all-lambdas-lst terms)))
:flag terms)))
(verify-guards expand-all-lambdas)
(verify-termination find-warrant-function-name)
(verify-termination warrants-for-tamep
(declare (xargs :verify-guards nil)))
(local
(make-flag flag-warrants-for-tamep
warrants-for-tamep
:flag-mapping ((warrants-for-tamep term)
(warrants-for-tamep-functionp fn)
(warrants-for-suitably-tamep-listp lst))))
(local
(defthm-flag-warrants-for-tamep
(defthm true-listp-car-warrants-for-tamep
(implies (and (ilks-plist-worldp wrld)
(executable-tamep x wrld)
(true-listp warrants))
(true-listp (car (warrants-for-tamep
x wrld warrants unwarranteds))))
:flag term)
(defthm true-listp-car-warrants-for-tamep-functionp
(implies (and (ilks-plist-worldp wrld)
(executable-tamep-functionp fn wrld)
(true-listp warrants))
(true-listp (car (warrants-for-tamep-functionp
fn wrld warrants unwarranteds))))
:flag fn)
(defthm true-listp-car-warrants-for-warrants-for-suitably-tamep-listp
(implies (and (ilks-plist-worldp wrld)
(executable-suitably-tamep-listp flags args wrld)
(true-listp warrants))
(true-listp (car (warrants-for-suitably-tamep-listp
flags args wrld warrants unwarranteds))))
:flag lst)))
(local
(defthm-flag-warrants-for-tamep
(defthm symbol-listp-mv-nth-1-warrants-for-tamep
(implies (and (ilks-plist-worldp wrld)
(executable-tamep x wrld)
(symbol-listp unwarranteds))
(symbol-listp (mv-nth 1 (warrants-for-tamep
x wrld warrants unwarranteds))))
:flag term)
(defthm symbol-listp-mv-nth-1-warrants-for-tamep-functionp
(implies (and (ilks-plist-worldp wrld)
(executable-tamep-functionp fn wrld)
(symbol-listp unwarranteds))
(symbol-listp (mv-nth 1 (warrants-for-tamep-functionp
fn wrld warrants unwarranteds))))
:flag fn)
(defthm symbol-listp-mv-nth-1-warrants-for-warrants-for-suitably-tamep-listp
(implies (and (ilks-plist-worldp wrld)
(executable-suitably-tamep-listp flags args wrld)
(symbol-listp unwarranteds))
(symbol-listp (mv-nth 1 (warrants-for-suitably-tamep-listp
flags args wrld warrants unwarranteds))))
:flag lst)))
(verify-guards warrants-for-tamep)
(defthm weak-splo-extracts-tuple-listp-forward-to-true-listp
(implies (weak-splo-extracts-tuple-listp x)
(true-listp x))
:rule-classes :forward-chaining)
(defthm weak-splo-extracts-tuple-listp-append
(implies (true-listp x)
(equal (weak-splo-extracts-tuple-listp (append x y))
(and (weak-splo-extracts-tuple-listp x)
(weak-splo-extracts-tuple-listp y)))))
(verify-termination type-expressions-from-type-spec)
(verify-termination syntactically-plausible-lambda-objectp1
(declare (xargs :verify-guards nil)))
(verify-termination syntactically-plausible-lambda-objectp
(declare (xargs :verify-guards nil)))
(local
(make-flag flag-syntactically-plausible-lambda-objectp
syntactically-plausible-lambda-objectp
:flag-mapping
((syntactically-plausible-lambda-objectp main)
(syntactically-plausible-lambda-objectsp-within within)
(syntactically-plausible-lambda-objectsp-within-lst listp))))
(local
(defthm-flag-syntactically-plausible-lambda-objectp
(defthm weak-splo-extracts-tuple-listp-1
(implies (syntactically-plausible-lambda-objectp gflg x)
(weak-splo-extracts-tuple-listp
(syntactically-plausible-lambda-objectp gflg x)))
:flag main)
(defthm weak-splo-extracts-tuple-listp-2
(let ((ans (syntactically-plausible-lambda-objectsp-within gflg body)))
(implies ans
(or (weak-splo-extracts-tuple-listp ans)
(equal ans t))))
:rule-classes nil
:flag within)
(defthm weak-splo-extracts-tuple-listp-3
(let ((ans (syntactically-plausible-lambda-objectsp-within-lst gflg args)))
(implies ans
(or (weak-splo-extracts-tuple-listp ans)
(equal ans t))))
:rule-classes nil
:flag listp)))
(defthm weak-splo-extracts-tuple-listp-of-syntactically-plausible-lambda-objectp
(implies
(syntactically-plausible-lambda-objectp nil x)
(weak-splo-extracts-tuple-listp
(syntactically-plausible-lambda-objectp nil x))))
(verify-termination well-formed-lambda-objectp)
(verify-termination possibly-dirty-lambda-objectp)
(verify-guards possibly-dirty-lambda-objectp)
|
5a8372b5bd77b87f6eda680a97e0a6c8b5004bb94482d097a5884c1ac782ede3 | xapi-project/xen-api | lwt_support.mli |
* Copyright ( C ) Citrix Systems Inc.
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation ; version 2.1 only . with the special
* exception on linking described in file LICENSE .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU Lesser General Public License for more details .
* Copyright (C) Citrix Systems Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; version 2.1 only. with the special
* exception on linking described in file LICENSE.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*)
type 'a t = 'a Iteratees.Iteratee(Lwt).t =
| IE_done of 'a
| IE_cont of
Iteratees.err option
* (Iteratees.stream -> ('a t * Iteratees.stream) Lwt.t)
val really_write : Lwt_unix.file_descr -> string -> unit Lwt.t
(** Really write a string to a file descriptor - repeats until the whole
string is done *)
val lwt_fd_enumerator : Lwt_unix.file_descr -> 'a t -> 'a t Lwt.t
(** Read from an Lwt fd and send the chunks to an iteratee *)
val lwt_enumerator : string -> 'a t -> 'a t Lwt.t
(** Read from a named file and send the chunks to an iteratee *)
exception Host_not_found of string
val with_fd :
Lwt_unix.file_descr -> callback:(Lwt_unix.file_descr -> 'a Lwt.t) -> 'a Lwt.t
(** Given a file descriptor (Lwt), it executes the function
[callback] passing it the connected file descriptor, ensuring to close
the file descriptor when returning. *)
(** Given socket, open a connection and execute the function
[callback] passing it the connected file descriptor, ensuring to
close the file descriptor when returning. The function can fail
with Unix exceptions and Host_not_found. *)
val with_open_connection_fd :
Unix.sockaddr -> callback:(Lwt_unix.file_descr -> 'a Lwt.t) -> 'a Lwt.t
| null | https://raw.githubusercontent.com/xapi-project/xen-api/ade9c3eb5a64fdc06880ffae75c4de86d266de6a/ocaml/wsproxy/src/lwt_support.mli | ocaml | * Really write a string to a file descriptor - repeats until the whole
string is done
* Read from an Lwt fd and send the chunks to an iteratee
* Read from a named file and send the chunks to an iteratee
* Given a file descriptor (Lwt), it executes the function
[callback] passing it the connected file descriptor, ensuring to close
the file descriptor when returning.
* Given socket, open a connection and execute the function
[callback] passing it the connected file descriptor, ensuring to
close the file descriptor when returning. The function can fail
with Unix exceptions and Host_not_found. |
* Copyright ( C ) Citrix Systems Inc.
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation ; version 2.1 only . with the special
* exception on linking described in file LICENSE .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU Lesser General Public License for more details .
* Copyright (C) Citrix Systems Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; version 2.1 only. with the special
* exception on linking described in file LICENSE.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*)
type 'a t = 'a Iteratees.Iteratee(Lwt).t =
| IE_done of 'a
| IE_cont of
Iteratees.err option
* (Iteratees.stream -> ('a t * Iteratees.stream) Lwt.t)
val really_write : Lwt_unix.file_descr -> string -> unit Lwt.t
val lwt_fd_enumerator : Lwt_unix.file_descr -> 'a t -> 'a t Lwt.t
val lwt_enumerator : string -> 'a t -> 'a t Lwt.t
exception Host_not_found of string
val with_fd :
Lwt_unix.file_descr -> callback:(Lwt_unix.file_descr -> 'a Lwt.t) -> 'a Lwt.t
val with_open_connection_fd :
Unix.sockaddr -> callback:(Lwt_unix.file_descr -> 'a Lwt.t) -> 'a Lwt.t
|
fb26d41902982ffd2bc6627ff3f304916ae633571c45bf7889ff35fcdaf9f347 | PKUHelper/pkuipgw | locale.clj | (ns pkuipgw.locale
(:require [clojure.java.io :as io])
(:require [cheshire.core :as json]))
(def ^:private home-path (System/getProperty "user.home"))
(def ^:private config-path (str home-path "/.pkuipgwconfig"))
(defn- config-exist? [] (.exists (io/file config-path)))
(defn load-config []
(if (config-exist?)
(json/parse-string (slurp config-path) true)
{}))
(defn store-config [arg]
(let [config (into (load-config) (select-keys arg [:user-id :password]))
config-string (json/generate-string config)]
(spit config-path config-string)))
| null | https://raw.githubusercontent.com/PKUHelper/pkuipgw/fe602c892cf047ab8b95dce2b8bc4190a814c387/src/pkuipgw/locale.clj | clojure | (ns pkuipgw.locale
(:require [clojure.java.io :as io])
(:require [cheshire.core :as json]))
(def ^:private home-path (System/getProperty "user.home"))
(def ^:private config-path (str home-path "/.pkuipgwconfig"))
(defn- config-exist? [] (.exists (io/file config-path)))
(defn load-config []
(if (config-exist?)
(json/parse-string (slurp config-path) true)
{}))
(defn store-config [arg]
(let [config (into (load-config) (select-keys arg [:user-id :password]))
config-string (json/generate-string config)]
(spit config-path config-string)))
|
|
d1c8f97c0f0c4873d68ff05cc416af5959607335b23e009210ddf17c69ed2a40 | hlprmnky/ion-appsync-example | node_pages.clj | (ns ^{:doc "Generation of AWS Cloudformation templates which provide html pages running in Node.js Lambdas."}
cf.node-pages
(:require
[crucible.core :refer [template parameter resource output xref encode join sub region account-id]]
[crucible.aws.s3 :as s3]
[crucible.aws.iam :as iam]
[crucible.aws.lambda :as lambda]
[crucible.aws.api-gateway :as api-gw]
[crucible.policies :as policies]
[clojure.java.io :as io]
[clojure.data.json :as json]))
(def zip-file-path "pages/handlers.zip")
(defn add-page
"adds all the resources for an html page to be included in the stack"
[stack code-in-bucket? {:keys [page-name js-handler path]}]
(let [lambda-env {::lambda/variables {:cognito_pool (xref :CognitoPoolId)
:cognito_domain (xref :CognitoDomain)
:cognito_client_id (xref :CognitoClientId)
:cognito_client_secret (xref :CognitoClientSecret)}}
fake-code {::lambda/zip-file "foo/bar.zip"} ; used to create lambdas without real code loaded
zip-file (if code-in-bucket?
{::lambda/s3-bucket (xref :code-bucket)
::lambda/s3-key zip-file-path}
fake-code)
lambda-resource-key (keyword (str "lambda-page-" page-name))
api-resource-key (keyword (str "api-" page-name))]
(-> stack
(assoc lambda-resource-key
(lambda/function {::lambda/function-name (join [(xref :Application) "-page-" page-name])
::lambda/code zip-file
::lambda/handler (str "handlers." js-handler)
::lambda/role (xref :lamdba-role :arn)
::lambda/runtime "nodejs6.10"
::lambda/environment lambda-env}))
(assoc (keyword (str "permission-" page-name))
(lambda/permission {::lambda/action "lambda:invokeFunction"
::lambda/function-name (xref lambda-resource-key :arn)
::lambda/principal "apigateway.amazonaws.com"
::lambda/source-arn (join ["arn:aws:execute-api:"
region
":"
account-id
":"
(xref :api)
"/*"])}))
(assoc api-resource-key (api-gw/resource {:rest-api-id (xref :api)
:parent-id (xref :api :root-resource-id)
:path-part path}))
(assoc (keyword (str "api-get-" page-name))
(api-gw/method {:http-method "GET"
:resource-id (xref api-resource-key)
:rest-api-id (xref :api)
:authorization-type "NONE"
:integration {:type "AWS_PROXY"
:integration-http-method "POST" ; Important! Lambda must be invoked using a POST
:uri (join ["arn:aws:apigateway:"
region
":lambda:path/2015-03-31/functions/"
(xref lambda-resource-key :arn)
"/invocations"])}})))))
(defn stack-json
"return a CF template. code-loaded? indicates if the node.js code has been uploaded to the s3 bucket."
[code-loaded?]
(-> {:Application (parameter)
:CognitoPoolId (parameter)
:CognitoDomain (parameter)
:CognitoClientId (parameter)
:CognitoClientSecret (parameter)}
(assoc :code-bucket (s3/bucket {::s3/bucket-name (join [(xref :Application) "-nodejs-pages"])}))
(assoc :lamdba-role (iam/lambda-access-role))
(add-page code-loaded? {:page-name "home"
:js-handler "home"
:path "home"})
(add-page code-loaded? {:page-name "app"
:js-handler "app"
:path "app"})
; using join instead of sub because once fixed, then use (sub "${Application}-home-page")
(assoc :api (api-gw/rest-api {::api-gw/name (join [(xref :Application) "-html-pages"])
::api-gw/description "routes to invoke lamdbas that generate html pages"}))
(assoc :api-deployment (-> (api-gw/deployment {:rest-api-id (xref :api)})
deployment must wait for at least one method in the api . use a depends - on to ensure this
(assoc-in [1 :depends-on] [:api-get-home])))
(assoc :api-deployment-stage-dev (api-gw/stage {:rest-api-id (xref :api)
:deployment-id (xref :api-deployment)
:stage-name "dev"}))
(template "Lambdas and API Gateway resources to serve html pages (including login and logout flows)")))
; defined as a vars so that the Crucible lein task can be used to generate
(def create-stack (stack-json false))
(def update-stack (stack-json true))
(comment
(let [write (fn [template file]
(let [_ (io/make-parents file)
f (io/file file)]
(with-open [w (io/writer f)]
(json/write (json/read-str (encode template)) w))))]
(write create-stack "target/templates/cf/node-pages/create-stack.json")
(write update-stack "target/templates/cf/node-pages/update-stack.json"))) | null | https://raw.githubusercontent.com/hlprmnky/ion-appsync-example/76a3dcfcf2d09c066a8d4dac7f1e930f79fb1237/src-pages/cf/node_pages.clj | clojure | used to create lambdas without real code loaded
Important! Lambda must be invoked using a POST
using join instead of sub because once fixed, then use (sub "${Application}-home-page")
defined as a vars so that the Crucible lein task can be used to generate | (ns ^{:doc "Generation of AWS Cloudformation templates which provide html pages running in Node.js Lambdas."}
cf.node-pages
(:require
[crucible.core :refer [template parameter resource output xref encode join sub region account-id]]
[crucible.aws.s3 :as s3]
[crucible.aws.iam :as iam]
[crucible.aws.lambda :as lambda]
[crucible.aws.api-gateway :as api-gw]
[crucible.policies :as policies]
[clojure.java.io :as io]
[clojure.data.json :as json]))
(def zip-file-path "pages/handlers.zip")
(defn add-page
"adds all the resources for an html page to be included in the stack"
[stack code-in-bucket? {:keys [page-name js-handler path]}]
(let [lambda-env {::lambda/variables {:cognito_pool (xref :CognitoPoolId)
:cognito_domain (xref :CognitoDomain)
:cognito_client_id (xref :CognitoClientId)
:cognito_client_secret (xref :CognitoClientSecret)}}
zip-file (if code-in-bucket?
{::lambda/s3-bucket (xref :code-bucket)
::lambda/s3-key zip-file-path}
fake-code)
lambda-resource-key (keyword (str "lambda-page-" page-name))
api-resource-key (keyword (str "api-" page-name))]
(-> stack
(assoc lambda-resource-key
(lambda/function {::lambda/function-name (join [(xref :Application) "-page-" page-name])
::lambda/code zip-file
::lambda/handler (str "handlers." js-handler)
::lambda/role (xref :lamdba-role :arn)
::lambda/runtime "nodejs6.10"
::lambda/environment lambda-env}))
(assoc (keyword (str "permission-" page-name))
(lambda/permission {::lambda/action "lambda:invokeFunction"
::lambda/function-name (xref lambda-resource-key :arn)
::lambda/principal "apigateway.amazonaws.com"
::lambda/source-arn (join ["arn:aws:execute-api:"
region
":"
account-id
":"
(xref :api)
"/*"])}))
(assoc api-resource-key (api-gw/resource {:rest-api-id (xref :api)
:parent-id (xref :api :root-resource-id)
:path-part path}))
(assoc (keyword (str "api-get-" page-name))
(api-gw/method {:http-method "GET"
:resource-id (xref api-resource-key)
:rest-api-id (xref :api)
:authorization-type "NONE"
:integration {:type "AWS_PROXY"
:uri (join ["arn:aws:apigateway:"
region
":lambda:path/2015-03-31/functions/"
(xref lambda-resource-key :arn)
"/invocations"])}})))))
(defn stack-json
"return a CF template. code-loaded? indicates if the node.js code has been uploaded to the s3 bucket."
[code-loaded?]
(-> {:Application (parameter)
:CognitoPoolId (parameter)
:CognitoDomain (parameter)
:CognitoClientId (parameter)
:CognitoClientSecret (parameter)}
(assoc :code-bucket (s3/bucket {::s3/bucket-name (join [(xref :Application) "-nodejs-pages"])}))
(assoc :lamdba-role (iam/lambda-access-role))
(add-page code-loaded? {:page-name "home"
:js-handler "home"
:path "home"})
(add-page code-loaded? {:page-name "app"
:js-handler "app"
:path "app"})
(assoc :api (api-gw/rest-api {::api-gw/name (join [(xref :Application) "-html-pages"])
::api-gw/description "routes to invoke lamdbas that generate html pages"}))
(assoc :api-deployment (-> (api-gw/deployment {:rest-api-id (xref :api)})
deployment must wait for at least one method in the api . use a depends - on to ensure this
(assoc-in [1 :depends-on] [:api-get-home])))
(assoc :api-deployment-stage-dev (api-gw/stage {:rest-api-id (xref :api)
:deployment-id (xref :api-deployment)
:stage-name "dev"}))
(template "Lambdas and API Gateway resources to serve html pages (including login and logout flows)")))
(def create-stack (stack-json false))
(def update-stack (stack-json true))
(comment
(let [write (fn [template file]
(let [_ (io/make-parents file)
f (io/file file)]
(with-open [w (io/writer f)]
(json/write (json/read-str (encode template)) w))))]
(write create-stack "target/templates/cf/node-pages/create-stack.json")
(write update-stack "target/templates/cf/node-pages/update-stack.json"))) |
22c1d8081035bc4cf7cf867856661acdf850c0bc116a96a517b5d9b82933fdba | geophf/1HaskellADay | Exercise.hs | module Y2021.M04.D28.Exercise where
-
This is interesting
> > > Nothing > Just 70
False
> > > Nothing < Just 70
True
... huh . So Nothing is ... Just 0 ? That , for me , is n't good . I do n't want
something to succeed when I have only partial data .
me .
So , we have some functions that take two values , dependent on both , and
some other functions , that , to conform to the interface , takes two values ,
but only depends on the first value .
Why would I do that ?
Trending data and analytics . Some analytics depend on a trend , established
over a time series , and some analytics are realized from current data ( only ) .
Take indicators from the stock market , for example . Some indicators , like the
SMA50/200 ( Simple Moving Averages 50 ( days ) and 200 ( days ) ) require a cross-
over from the previous period to this period :
buy = yesterday ( sma50 < sma200 ) & & today ( sma50 )
Whereas the Relative Strength Index has a buy or sell signal dependent only
on a cross - over in this period
buy = today ( rsi14 < 30 )
sell = today ( rsi14 > 70 )
But you may not always have trending data , for example : from a new security .
Let 's do this .
You have a Time - series of trending data .
-
This is interesting
>>> Nothing > Just 70
False
>>> Nothing < Just 70
True
... huh. So Nothing is ... Just 0? That, for me, isn't good. I don't want
something to succeed when I have only partial data.
Pesky me.
So, we have some functions that take two values, dependent on both, and
some other functions, that, to conform to the interface, takes two values,
but only depends on the first value.
Why would I do that?
Trending data and analytics. Some analytics depend on a trend, established
over a time series, and some analytics are realized from current data (only).
Take indicators from the stock market, for example. Some indicators, like the
SMA50/200 (Simple Moving Averages 50 (days) and 200 (days)) require a cross-
over from the previous period to this period:
buy = yesterday (sma50 < sma200) && today (sma50 > sma200)
Whereas the Relative Strength Index has a buy or sell signal dependent only
on a cross-over in this period
buy = today (rsi14 < 30)
sell = today (rsi14 > 70)
But you may not always have trending data, for example: from a new security.
Let's do this.
You have a Time-series of trending data.
--}
import Data.Map (Map)
import Data.Set (Set)
data Trend = Trend { sma50, sma200, rsi14 :: Maybe Double }
deriving (Eq, Ord, Show)
-- note: you may have some, or all, trending data.
-- And you have function-type that takes trending data and returns a buy/sell
-- recommendation
data Call = BUY | SELL
deriving (Eq, Ord, Show)
data Basis = GoldenCross | DeathCross | RSI70 | RSI30
deriving (Eq, Ord, Show)
data Recommendation = Rec Call Basis
deriving (Eq, Ord, Show)
type RunRec = Trend -> Trend -> Maybe Recommendation
-
Today 's problem is a two - layered cake ( with icing , of course ):
First , the sma functions need both today 's and yesterday 's trends ( as described
above ) . The RSI functions need only today 's trends .
ANY function will fail if the indicator in the trend is not present .
From a time - series of trends , collect and return buy / sell recommendations .
DISCLAIMER ! Please note , all data provided are FAKE , MADE UP , INACCURATE .
A BUY or SELL recommendation here is NOT financial advice in any way , shape ,
or form , so ... THERE !
OKAY !
So , let 's pretend we have three companies . One that has one set of trending
data , and the other two that have a series of trending data , complete or
incomplete .
The timeseries goes into the past : the first element is today 's trends , the
second element is yesterday 's trends , third , day before 's , ... etc .
-
Today's Haskell problem is a two-layered cake (with icing, of course):
First, the sma functions need both today's and yesterday's trends (as described
above). The RSI functions need only today's trends.
ANY function will fail if the indicator in the trend is not present.
From a time-series of trends, collect and return buy/sell recommendations.
DISCLAIMER! Please note, all data provided are FAKE, MADE UP, INACCURATE.
A BUY or SELL recommendation here is NOT financial advice in any way, shape,
or form, so ... THERE!
OKAY!
So, let's pretend we have three companies. One that has one set of trending
data, and the other two that have a series of trending data, complete or
incomplete.
The timeseries goes into the past: the first element is today's trends, the
second element is yesterday's trends, third, day before's, ... etc.
--}
abc, xyz, pqr :: [Trend]
abc = [Trend (Just 7.4) Nothing (Just 55)]
xyz = [Trend (Just 10) (Just 20) (Just 27), Trend (Just 11) Nothing (Just 12)]
pqr = [Trend (Just 7) (Just 15) (Just 45), Trend (Just 17) (Just 14) (Just 59)]
-- Now we have recommendation functions
-- to set up, we need functions that compare data in the monadic domain
lt, gt :: Maybe Double -> Maybe Double -> Maybe Bool
lt = undefined -- 'less than'
gt = undefined -- 'greater than'
nd :: Maybe Bool -> Maybe Bool -> Maybe Bool
nd = undefined -- 'and' for monadic booleans
-- now, define implication:
(-|) :: Bool -> a -> Maybe a
p -| q = undefined -- if p then Just q else Nothing
-- now: using (-|) create (=|)
(=|) :: Maybe Bool -> a -> Maybe a
if p exists and p then q else Nothing
the ' Golden Cross ' says SMA50 crosses above the from yesterday to
today .
goldenCross :: RunRec
goldenCross today yesterday =
(sma50 today `gt` sma200 today)
`nd` (sma50 yesterday `lt` sma200 yesterday) =| Rec BUY GoldenCross
The ' Death Cross ' says the dual
deathCross :: RunRec
deathCross today yesterday =
(sma50 today `lt` sma200 today)
`nd` (sma50 yesterday `gt` sma200 yesterday) =| Rec SELL DeathCross
The RSI 14 cases :
rsi70, rsi30 :: RunRec
rsi70 today _ = rsi14 today `gt` Just 70 =| Rec SELL RSI70
rsi30 today _ = rsi14 today `lt` Just 30 =| Rec BUY RSI30
-- now the rsi-functions can be run straight off, but the cross-functions
-- need to be lifted:
type RunRec' = Trend -> Maybe Trend -> Maybe Recommendation
write a function to convert an RunRec - function to a RunRec ' one .
toRunRec' :: RunRec -> RunRec'
toRunRec' = undefined
-- run the functions against the companies and return a set of recommendations
-- (if any) for each company
type Company = String
runRecs :: [RunRec] -> [(Company, [Trend])] -> Map Company (Set Recommendation)
runRecs recs companies = undefined
recs :: [RunRec]
recs = [goldenCross, deathCross, rsi70, rsi30]
what are the Buy or Sell recommendations for abc , xyz , and pqr ?
-
> > > runRecs ( zip ( words " abc xyz pqr " ) [ abc , xyz , ] )
-
>>> runRecs recs (zip (words "abc xyz pqr") [abc, xyz, pqr])
--}
| null | https://raw.githubusercontent.com/geophf/1HaskellADay/613e0ec29b256f276fde29897e9b64c1a8f9ef73/exercises/HAD/Y2021/M04/D28/Exercise.hs | haskell | }
note: you may have some, or all, trending data.
And you have function-type that takes trending data and returns a buy/sell
recommendation
}
Now we have recommendation functions
to set up, we need functions that compare data in the monadic domain
'less than'
'greater than'
'and' for monadic booleans
now, define implication:
if p then Just q else Nothing
now: using (-|) create (=|)
now the rsi-functions can be run straight off, but the cross-functions
need to be lifted:
run the functions against the companies and return a set of recommendations
(if any) for each company
} | module Y2021.M04.D28.Exercise where
-
This is interesting
> > > Nothing > Just 70
False
> > > Nothing < Just 70
True
... huh . So Nothing is ... Just 0 ? That , for me , is n't good . I do n't want
something to succeed when I have only partial data .
me .
So , we have some functions that take two values , dependent on both , and
some other functions , that , to conform to the interface , takes two values ,
but only depends on the first value .
Why would I do that ?
Trending data and analytics . Some analytics depend on a trend , established
over a time series , and some analytics are realized from current data ( only ) .
Take indicators from the stock market , for example . Some indicators , like the
SMA50/200 ( Simple Moving Averages 50 ( days ) and 200 ( days ) ) require a cross-
over from the previous period to this period :
buy = yesterday ( sma50 < sma200 ) & & today ( sma50 )
Whereas the Relative Strength Index has a buy or sell signal dependent only
on a cross - over in this period
buy = today ( rsi14 < 30 )
sell = today ( rsi14 > 70 )
But you may not always have trending data , for example : from a new security .
Let 's do this .
You have a Time - series of trending data .
-
This is interesting
>>> Nothing > Just 70
False
>>> Nothing < Just 70
True
... huh. So Nothing is ... Just 0? That, for me, isn't good. I don't want
something to succeed when I have only partial data.
Pesky me.
So, we have some functions that take two values, dependent on both, and
some other functions, that, to conform to the interface, takes two values,
but only depends on the first value.
Why would I do that?
Trending data and analytics. Some analytics depend on a trend, established
over a time series, and some analytics are realized from current data (only).
Take indicators from the stock market, for example. Some indicators, like the
SMA50/200 (Simple Moving Averages 50 (days) and 200 (days)) require a cross-
over from the previous period to this period:
buy = yesterday (sma50 < sma200) && today (sma50 > sma200)
Whereas the Relative Strength Index has a buy or sell signal dependent only
on a cross-over in this period
buy = today (rsi14 < 30)
sell = today (rsi14 > 70)
But you may not always have trending data, for example: from a new security.
Let's do this.
You have a Time-series of trending data.
import Data.Map (Map)
import Data.Set (Set)
data Trend = Trend { sma50, sma200, rsi14 :: Maybe Double }
deriving (Eq, Ord, Show)
data Call = BUY | SELL
deriving (Eq, Ord, Show)
data Basis = GoldenCross | DeathCross | RSI70 | RSI30
deriving (Eq, Ord, Show)
data Recommendation = Rec Call Basis
deriving (Eq, Ord, Show)
type RunRec = Trend -> Trend -> Maybe Recommendation
-
Today 's problem is a two - layered cake ( with icing , of course ):
First , the sma functions need both today 's and yesterday 's trends ( as described
above ) . The RSI functions need only today 's trends .
ANY function will fail if the indicator in the trend is not present .
From a time - series of trends , collect and return buy / sell recommendations .
DISCLAIMER ! Please note , all data provided are FAKE , MADE UP , INACCURATE .
A BUY or SELL recommendation here is NOT financial advice in any way , shape ,
or form , so ... THERE !
OKAY !
So , let 's pretend we have three companies . One that has one set of trending
data , and the other two that have a series of trending data , complete or
incomplete .
The timeseries goes into the past : the first element is today 's trends , the
second element is yesterday 's trends , third , day before 's , ... etc .
-
Today's Haskell problem is a two-layered cake (with icing, of course):
First, the sma functions need both today's and yesterday's trends (as described
above). The RSI functions need only today's trends.
ANY function will fail if the indicator in the trend is not present.
From a time-series of trends, collect and return buy/sell recommendations.
DISCLAIMER! Please note, all data provided are FAKE, MADE UP, INACCURATE.
A BUY or SELL recommendation here is NOT financial advice in any way, shape,
or form, so ... THERE!
OKAY!
So, let's pretend we have three companies. One that has one set of trending
data, and the other two that have a series of trending data, complete or
incomplete.
The timeseries goes into the past: the first element is today's trends, the
second element is yesterday's trends, third, day before's, ... etc.
abc, xyz, pqr :: [Trend]
abc = [Trend (Just 7.4) Nothing (Just 55)]
xyz = [Trend (Just 10) (Just 20) (Just 27), Trend (Just 11) Nothing (Just 12)]
pqr = [Trend (Just 7) (Just 15) (Just 45), Trend (Just 17) (Just 14) (Just 59)]
lt, gt :: Maybe Double -> Maybe Double -> Maybe Bool
nd :: Maybe Bool -> Maybe Bool -> Maybe Bool
(-|) :: Bool -> a -> Maybe a
(=|) :: Maybe Bool -> a -> Maybe a
if p exists and p then q else Nothing
the ' Golden Cross ' says SMA50 crosses above the from yesterday to
today .
goldenCross :: RunRec
goldenCross today yesterday =
(sma50 today `gt` sma200 today)
`nd` (sma50 yesterday `lt` sma200 yesterday) =| Rec BUY GoldenCross
The ' Death Cross ' says the dual
deathCross :: RunRec
deathCross today yesterday =
(sma50 today `lt` sma200 today)
`nd` (sma50 yesterday `gt` sma200 yesterday) =| Rec SELL DeathCross
The RSI 14 cases :
rsi70, rsi30 :: RunRec
rsi70 today _ = rsi14 today `gt` Just 70 =| Rec SELL RSI70
rsi30 today _ = rsi14 today `lt` Just 30 =| Rec BUY RSI30
type RunRec' = Trend -> Maybe Trend -> Maybe Recommendation
write a function to convert an RunRec - function to a RunRec ' one .
toRunRec' :: RunRec -> RunRec'
toRunRec' = undefined
type Company = String
runRecs :: [RunRec] -> [(Company, [Trend])] -> Map Company (Set Recommendation)
runRecs recs companies = undefined
recs :: [RunRec]
recs = [goldenCross, deathCross, rsi70, rsi30]
what are the Buy or Sell recommendations for abc , xyz , and pqr ?
-
> > > runRecs ( zip ( words " abc xyz pqr " ) [ abc , xyz , ] )
-
>>> runRecs recs (zip (words "abc xyz pqr") [abc, xyz, pqr])
|
55fdd13f3f9cabf065e62fcbb8f444196808f33781997366a12568f6ec194726 | haskell-works/avro | Int64Spec.hs | # LANGUAGE DeriveGeneric #
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE QuasiQuotes #-}
# LANGUAGE StrictData #
# LANGUAGE TemplateHaskell #
module Avro.Codec.Int64Spec (spec) where
import Data.Avro.Internal.Zig (zig)
import Data.Bits
import Data.ByteString.Builder
import qualified Data.ByteString.Lazy as BL
import Data.Int
import Data.List.Extra
import Data.Word
import Numeric (showHex)
import Avro.TestUtils
import HaskellWorks.Hspec.Hedgehog
import Hedgehog
import qualified Hedgehog.Gen as Gen
import qualified Hedgehog.Range as Range
import Test.Hspec
import Data.Avro (encodeValueWithSchema)
import Data.Avro.Deriving (deriveAvroFromByteString, r)
import qualified Data.Avro.Schema.Schema as Schema
HLINT ignore " Redundant do "
deriveAvroFromByteString [r|
{
"type": "record",
"name": "OnlyInt64",
"namespace": "test.contract",
"fields": [ {"name": "onlyInt64Value", "type": "long"} ]
}
|]
bitStringToWord8s :: String -> [Word8]
bitStringToWord8s = reverse . map (toWord . reverse) . chunksOf 8 . reverse . toBinary
where toBinary :: String -> [Bool]
toBinary ('1':xs) = True : toBinary xs
toBinary ('0':xs) = False : toBinary xs
toBinary (_ :xs) = toBinary xs
toBinary [] = []
toWord' :: Word8 -> [Bool] -> Word8
toWord' n (True :bs) = toWord' ((n `shiftL` 1) .|. 1) bs
toWord' n (False:bs) = toWord' ((n `shiftL` 1) .|. 0) bs
toWord' n _ = n
toWord = toWord' 0
spec :: Spec
spec = describe "Avro.Codec.Int64Spec" $ do
let schema = Schema.Long Nothing
it "Can encode 90071992547409917L correctly" $ require $ withTests 1 $ property $ do
let expectedBuffer = BL.pack [0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x02]
let value = OnlyInt64 90071992547409917
encodeValueWithSchema schema value === expectedBuffer
it "Can decode 90071992547409917L correctly" $ require $ withTests 1 $ property $ do
let buffer = BL.pack [0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x02]
let value = OnlyInt64 90071992547409917
encodeValueWithSchema schema value === buffer
it "Can decode encoded Int64 values" $ require $ property $ do
roundtripGen schema (Gen.int64 Range.linearBounded)
it "Can decode 129L" $ require $ withTests 1 $ property $ do
let w = 129 :: Int64
w' <- evalEither $ roundtrip schema w
w === w'
it "Can decode 36028797018963968 correctly" $ require $ withTests 1 $ property $ do
let buffer = BL.pack (bitStringToWord8s "10000000 10000000 10000000 10000000 10000000 10000000 10000000 10000000 00000001")
let value = OnlyInt64 36028797018963968
encodeValueWithSchema schema value === buffer
it "bitStringToWord8s 00000000" $ bitStringToWord8s "00000000" `shouldBe` [0x00 ]
it "bitStringToWord8s 00000001" $ bitStringToWord8s "00000001" `shouldBe` [0x01 ]
it "bitStringToWord8s 01111111" $ bitStringToWord8s "01111111" `shouldBe` [0x7f ]
it "bitStringToWord8s 10000000 00000001" $ bitStringToWord8s "10000000 00000001" `shouldBe` [0x80, 0x01 ]
it "bitStringToWord8s 10000001 00000001" $ bitStringToWord8s "10000001 00000001" `shouldBe` [0x81, 0x01 ]
it "bitStringToWord8s 10000010 00000001" $ bitStringToWord8s "10000010 00000001" `shouldBe` [0x82, 0x01 ]
it "bitStringToWord8s 11111111 01111111" $ bitStringToWord8s "11111111 01111111" `shouldBe` [0xff, 0x7f ]
it "bitStringToWord8s 10000000 10000000 00000001" $ bitStringToWord8s "10000000 10000000 00000001" `shouldBe` [0x80, 0x80, 0x01 ]
it "bitStringToWord8s 10000001 10000000 00000001" $ bitStringToWord8s "10000001 10000000 00000001" `shouldBe` [0x81, 0x80, 0x01 ]
it "bitStringToWord8s 10000001 10000000 00000000" $ bitStringToWord8s "10000001 10000000 00000000" `shouldBe` [0x81, 0x80, 0x00 ]
it "Can zig" $ require $ withTests 1 $ property $ do
zig ( 0 :: Int64) === 0
zig ( -1 :: Int64) === 1
zig ( 1 :: Int64) === 2
zig ( -2 :: Int64) === 3
zig ( 2147483647 :: Int64) === 4294967294
zig (-2147483648 :: Int64) === 4294967295
| null | https://raw.githubusercontent.com/haskell-works/avro/9eb4970fd72939a68aa6f947d3d2dc17280352f8/test/Avro/Codec/Int64Spec.hs | haskell | # LANGUAGE OverloadedStrings #
# LANGUAGE QuasiQuotes # | # LANGUAGE DeriveGeneric #
# LANGUAGE StrictData #
# LANGUAGE TemplateHaskell #
module Avro.Codec.Int64Spec (spec) where
import Data.Avro.Internal.Zig (zig)
import Data.Bits
import Data.ByteString.Builder
import qualified Data.ByteString.Lazy as BL
import Data.Int
import Data.List.Extra
import Data.Word
import Numeric (showHex)
import Avro.TestUtils
import HaskellWorks.Hspec.Hedgehog
import Hedgehog
import qualified Hedgehog.Gen as Gen
import qualified Hedgehog.Range as Range
import Test.Hspec
import Data.Avro (encodeValueWithSchema)
import Data.Avro.Deriving (deriveAvroFromByteString, r)
import qualified Data.Avro.Schema.Schema as Schema
HLINT ignore " Redundant do "
deriveAvroFromByteString [r|
{
"type": "record",
"name": "OnlyInt64",
"namespace": "test.contract",
"fields": [ {"name": "onlyInt64Value", "type": "long"} ]
}
|]
bitStringToWord8s :: String -> [Word8]
bitStringToWord8s = reverse . map (toWord . reverse) . chunksOf 8 . reverse . toBinary
where toBinary :: String -> [Bool]
toBinary ('1':xs) = True : toBinary xs
toBinary ('0':xs) = False : toBinary xs
toBinary (_ :xs) = toBinary xs
toBinary [] = []
toWord' :: Word8 -> [Bool] -> Word8
toWord' n (True :bs) = toWord' ((n `shiftL` 1) .|. 1) bs
toWord' n (False:bs) = toWord' ((n `shiftL` 1) .|. 0) bs
toWord' n _ = n
toWord = toWord' 0
spec :: Spec
spec = describe "Avro.Codec.Int64Spec" $ do
let schema = Schema.Long Nothing
it "Can encode 90071992547409917L correctly" $ require $ withTests 1 $ property $ do
let expectedBuffer = BL.pack [0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x02]
let value = OnlyInt64 90071992547409917
encodeValueWithSchema schema value === expectedBuffer
it "Can decode 90071992547409917L correctly" $ require $ withTests 1 $ property $ do
let buffer = BL.pack [0xfa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xbf, 0x02]
let value = OnlyInt64 90071992547409917
encodeValueWithSchema schema value === buffer
it "Can decode encoded Int64 values" $ require $ property $ do
roundtripGen schema (Gen.int64 Range.linearBounded)
it "Can decode 129L" $ require $ withTests 1 $ property $ do
let w = 129 :: Int64
w' <- evalEither $ roundtrip schema w
w === w'
it "Can decode 36028797018963968 correctly" $ require $ withTests 1 $ property $ do
let buffer = BL.pack (bitStringToWord8s "10000000 10000000 10000000 10000000 10000000 10000000 10000000 10000000 00000001")
let value = OnlyInt64 36028797018963968
encodeValueWithSchema schema value === buffer
it "bitStringToWord8s 00000000" $ bitStringToWord8s "00000000" `shouldBe` [0x00 ]
it "bitStringToWord8s 00000001" $ bitStringToWord8s "00000001" `shouldBe` [0x01 ]
it "bitStringToWord8s 01111111" $ bitStringToWord8s "01111111" `shouldBe` [0x7f ]
it "bitStringToWord8s 10000000 00000001" $ bitStringToWord8s "10000000 00000001" `shouldBe` [0x80, 0x01 ]
it "bitStringToWord8s 10000001 00000001" $ bitStringToWord8s "10000001 00000001" `shouldBe` [0x81, 0x01 ]
it "bitStringToWord8s 10000010 00000001" $ bitStringToWord8s "10000010 00000001" `shouldBe` [0x82, 0x01 ]
it "bitStringToWord8s 11111111 01111111" $ bitStringToWord8s "11111111 01111111" `shouldBe` [0xff, 0x7f ]
it "bitStringToWord8s 10000000 10000000 00000001" $ bitStringToWord8s "10000000 10000000 00000001" `shouldBe` [0x80, 0x80, 0x01 ]
it "bitStringToWord8s 10000001 10000000 00000001" $ bitStringToWord8s "10000001 10000000 00000001" `shouldBe` [0x81, 0x80, 0x01 ]
it "bitStringToWord8s 10000001 10000000 00000000" $ bitStringToWord8s "10000001 10000000 00000000" `shouldBe` [0x81, 0x80, 0x00 ]
it "Can zig" $ require $ withTests 1 $ property $ do
zig ( 0 :: Int64) === 0
zig ( -1 :: Int64) === 1
zig ( 1 :: Int64) === 2
zig ( -2 :: Int64) === 3
zig ( 2147483647 :: Int64) === 4294967294
zig (-2147483648 :: Int64) === 4294967295
|
fcb22f0326047619bf34137801e74cc733aabe22a7c24ff9168e3b408b25f0f7 | agrafix/funblog | Post.hs | {-# LANGUAGE OverloadedStrings #-}
module Web.Forms.Post where
import Model.CoreTypes
import Data.Time
import Text.Blaze.Html (Html)
import Text.Digestive hiding (Post)
import Text.Digestive.Bootstrap
postForm :: Monad m => UTCTime -> Form Html m Post
postForm now =
Post <$> "title" .: text Nothing
<*> "date" .: stringRead "Couldn't parse as UTCTime" (Just now)
<*> "content" .: text Nothing
postFormSpec :: FormMeta
postFormSpec =
FormMeta
{ fm_method = POST
, fm_target = "/write"
, fm_components =
[ FCSection
FormSection
{ fs_title = Nothing
, fs_help = Nothing
, fs_elements =
[ FormElement "title" (Just "Title") (Just "Title") InputText
, FormElement "date" (Just "Date") Nothing InputText
, FormElement "content" (Just "Content") (Just "Content") $ InputTextArea (Just 30) (Just 10)
]
}
]
, fm_submitValue = "Publish"
}
| null | https://raw.githubusercontent.com/agrafix/funblog/d401fb61aef9a0d8310b767f44893ee7b13015c7/src/Web/Forms/Post.hs | haskell | # LANGUAGE OverloadedStrings # | module Web.Forms.Post where
import Model.CoreTypes
import Data.Time
import Text.Blaze.Html (Html)
import Text.Digestive hiding (Post)
import Text.Digestive.Bootstrap
postForm :: Monad m => UTCTime -> Form Html m Post
postForm now =
Post <$> "title" .: text Nothing
<*> "date" .: stringRead "Couldn't parse as UTCTime" (Just now)
<*> "content" .: text Nothing
postFormSpec :: FormMeta
postFormSpec =
FormMeta
{ fm_method = POST
, fm_target = "/write"
, fm_components =
[ FCSection
FormSection
{ fs_title = Nothing
, fs_help = Nothing
, fs_elements =
[ FormElement "title" (Just "Title") (Just "Title") InputText
, FormElement "date" (Just "Date") Nothing InputText
, FormElement "content" (Just "Content") (Just "Content") $ InputTextArea (Just 30) (Just 10)
]
}
]
, fm_submitValue = "Publish"
}
|
fb735bf3d456bd2e73ebdc4ccac90f55c7b91f40524a00edf9a51b779e106026 | rescript-lang/rescript-compiler | ext_string.ml | Copyright ( C ) 2015 - 2016 Bloomberg Finance L.P.
* Copyright ( C ) 2017 - , Authors of ReScript
* This program is free software : you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation , either version 3 of the License , or
* ( at your option ) any later version .
*
* In addition to the permissions granted to you by the LGPL , you may combine
* or link a " work that uses the Library " with a publicly distributed version
* of this file to produce a combined library or application , then distribute
* that combined work under the terms of your choosing , with no requirement
* to comply with the obligations normally placed on you by section 4 of the
* LGPL version 3 ( or the corresponding section of a later version of the LGPL
* should you choose to use a later version ) .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU Lesser General Public License for more details .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc. , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
* Copyright (C) 2017 - Hongbo Zhang, Authors of ReScript
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In addition to the permissions granted to you by the LGPL, you may combine
* or link a "work that uses the Library" with a publicly distributed version
* of this file to produce a combined library or application, then distribute
* that combined work under the terms of your choosing, with no requirement
* to comply with the obligations normally placed on you by section 4 of the
* LGPL version 3 (or the corresponding section of a later version of the LGPL
* should you choose to use a later version).
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *)
(*
{[ split " test_unsafe_obj_ffi_ppx.cmi" ~keep_empty:false ' ']}
*)
let split_by ?(keep_empty=false) is_delim str =
let len = String.length str in
let rec loop acc last_pos pos =
if pos = -1 then
if last_pos = 0 && not keep_empty then
acc
else
String.sub str 0 last_pos :: acc
else
if is_delim str.[pos] then
let new_len = (last_pos - pos - 1) in
if new_len <> 0 || keep_empty then
let v = String.sub str (pos + 1) new_len in
loop ( v :: acc)
pos (pos - 1)
else loop acc pos (pos - 1)
else loop acc last_pos (pos - 1)
in
loop [] len (len - 1)
let trim s =
let i = ref 0 in
let j = String.length s in
while !i < j &&
let u = String.unsafe_get s !i in
u = '\t' || u = '\n' || u = ' '
do
incr i;
done;
let k = ref (j - 1) in
while !k >= !i &&
let u = String.unsafe_get s !k in
u = '\t' || u = '\n' || u = ' ' do
decr k ;
done;
String.sub s !i (!k - !i + 1)
let split ?keep_empty str on =
if str = "" then [] else
split_by ?keep_empty (fun x -> (x : char) = on) str ;;
let quick_split_by_ws str : string list =
split_by ~keep_empty:false (fun x -> x = '\t' || x = '\n' || x = ' ') str
let starts_with s beg =
let beg_len = String.length beg in
let s_len = String.length s in
beg_len <= s_len &&
(let i = ref 0 in
while !i < beg_len
&& String.unsafe_get s !i =
String.unsafe_get beg !i do
incr i
done;
!i = beg_len
)
let rec ends_aux s end_ j k =
if k < 0 then (j + 1)
else if String.unsafe_get s j = String.unsafe_get end_ k then
ends_aux s end_ (j - 1) (k - 1)
else -1
(** return an index which is minus when [s] does not
end with [beg]
*)
let ends_with_index s end_ : int =
let s_finish = String.length s - 1 in
let s_beg = String.length end_ - 1 in
if s_beg > s_finish then -1
else
ends_aux s end_ s_finish s_beg
let ends_with s end_ = ends_with_index s end_ >= 0
let ends_with_then_chop s beg =
let i = ends_with_index s beg in
if i >= 0 then Some (String.sub s 0 i)
else None
(* let check_suffix_case = ends_with *)
let check_suffix_case_then_chop = ends_with_then_chop
let check_any_suffix_case s suffixes =
Ext_list.exists suffixes ( fun x - > check_suffix_case s x )
Ext_list.exists suffixes (fun x -> check_suffix_case s x) *)
let s suffixes =
let rec aux suffixes =
match suffixes with
| [ ] - > None
| x::xs - >
let i d = ends_with_index s x in
if i d > = 0 then Some ( String.sub s 0 i d )
else aux xs in
aux suffixes
let rec aux suffixes =
match suffixes with
| [] -> None
| x::xs ->
let id = ends_with_index s x in
if id >= 0 then Some (String.sub s 0 id)
else aux xs in
aux suffixes *)
(* it is unsafe to expose such API as unsafe since
user can provide bad input range
*)
let rec unsafe_for_all_range s ~start ~finish p =
start > finish ||
p (String.unsafe_get s start) &&
unsafe_for_all_range s ~start:(start + 1) ~finish p
let for_all_from s start p =
let len = String.length s in
if start < 0 then invalid_arg "Ext_string.for_all_from"
else unsafe_for_all_range s ~start ~finish:(len - 1) p
let for_all s (p : char -> bool) =
unsafe_for_all_range s ~start:0 ~finish:(String.length s - 1) p
let is_empty s = String.length s = 0
let repeat n s =
let len = String.length s in
let res = Bytes.create(n * len) in
for i = 0 to pred n do
String.blit s 0 res (i * len) len
done;
Bytes.to_string res
let unsafe_is_sub ~sub i s j ~len =
let rec check k =
if k = len
then true
else
String.unsafe_get sub (i+k) =
String.unsafe_get s (j+k) && check (k+1)
in
j+len <= String.length s && check 0
let find ?(start=0) ~sub s =
let exception Local_exit in
let n = String.length sub in
let s_len = String.length s in
let i = ref start in
try
while !i + n <= s_len do
if unsafe_is_sub ~sub 0 s !i ~len:n then
raise_notrace Local_exit;
incr i
done;
-1
with Local_exit ->
!i
let contain_substring s sub =
find s ~sub >= 0
(** TODO: optimize
avoid nonterminating when string is empty
*)
let non_overlap_count ~sub s =
let sub_len = String.length sub in
let rec aux acc off =
let i = find ~start:off ~sub s in
if i < 0 then acc
else aux (acc + 1) (i + sub_len) in
if String.length sub = 0 then invalid_arg "Ext_string.non_overlap_count"
else aux 0 0
let rfind ~sub s =
let exception Local_exit in
let n = String.length sub in
let i = ref (String.length s - n) in
try
while !i >= 0 do
if unsafe_is_sub ~sub 0 s !i ~len:n then
raise_notrace Local_exit;
decr i
done;
-1
with Local_exit ->
!i
let tail_from s x =
let len = String.length s in
if x > len then invalid_arg ("Ext_string.tail_from " ^s ^ " : "^ string_of_int x )
else String.sub s x (len - x)
let equal (x : string) y = x = y
let rec index_rec s lim i c =
if i > = lim then -1 else
if String.unsafe_get s i = c then i
else index_rec s lim ( i + 1 ) c
if i >= lim then -1 else
if String.unsafe_get s i = c then i
else index_rec s lim (i + 1) c *)
let rec index_rec_count s lim i c count =
if i >= lim then -1 else
if String.unsafe_get s i = c then
if count = 1 then i
else index_rec_count s lim (i + 1) c (count - 1)
else index_rec_count s lim (i + 1) c count
let index_count s i c count =
let lim = String.length s in
if i < 0 || i >= lim || count < 1 then
invalid_arg ("index_count: ( " ^string_of_int i ^ "," ^string_of_int count ^ ")" );
index_rec_count s lim i c count
let index_next s i c =
index_count s i c 1
index_count s i c 1 *)
let extract_until s cursor c =
let len = s in
let start = ! cursor in
if start < 0 || start > = len then (
cursor : = -1 ;
" "
)
else
let i = in
let finish =
if i < 0 then (
cursor : = -1 ;
len
)
else (
cursor : = i + 1 ;
i
) in
String.sub s start ( finish - start )
let len = String.length s in
let start = !cursor in
if start < 0 || start >= len then (
cursor := -1;
""
)
else
let i = index_rec s len start c in
let finish =
if i < 0 then (
cursor := -1 ;
len
)
else (
cursor := i + 1;
i
) in
String.sub s start (finish - start) *)
let rec rindex_rec s i c =
if i < 0 then i else
if String.unsafe_get s i = c then i else rindex_rec s (i - 1) c;;
let rec rindex_rec_opt s i c =
if i < 0 then None else
if String.unsafe_get s i = c then Some i else rindex_rec_opt s (i - 1) c;;
let rindex_neg s c =
rindex_rec s (String.length s - 1) c;;
let rindex_opt s c =
rindex_rec_opt s (String.length s - 1) c;;
(** TODO: can be improved to return a positive integer instead *)
let rec unsafe_no_char x ch i last_idx =
i > last_idx ||
(String.unsafe_get x i <> ch && unsafe_no_char x ch (i + 1) last_idx)
let rec unsafe_no_char_idx x ch i last_idx =
if i > last_idx then -1
else
if String.unsafe_get x i <> ch then
unsafe_no_char_idx x ch (i + 1) last_idx
else i
let no_char x ch i len : bool =
let str_len = String.length x in
if i < 0 || i >= str_len || len >= str_len then invalid_arg "Ext_string.no_char"
else unsafe_no_char x ch i len
let no_slash x =
unsafe_no_char x '/' 0 (String.length x - 1)
let no_slash_idx x =
unsafe_no_char_idx x '/' 0 (String.length x - 1)
let no_slash_idx_from x from =
let last_idx = String.length x - 1 in
assert (from >= 0);
unsafe_no_char_idx x '/' from last_idx
let replace_slash_backward (x : string ) =
let len = String.length x in
if unsafe_no_char x '/' 0 (len - 1) then x
else
String.map (function
| '/' -> '\\'
| x -> x ) x
let replace_backward_slash (x : string)=
let len = String.length x in
if unsafe_no_char x '\\' 0 (len -1) then x
else
String.map (function
|'\\'-> '/'
| x -> x) x
let empty = ""
#ifdef BROWSER
let compare = Bs_hash_stubs.string_length_based_compare
#else
external compare : string -> string -> int = "caml_string_length_based_compare" [@@noalloc];;
#endif
let single_space = " "
let single_colon = ":"
let concat_array sep (s : string array) =
let s_len = Array.length s in
match s_len with
| 0 -> empty
| 1 -> Array.unsafe_get s 0
| _ ->
let sep_len = String.length sep in
let len = ref 0 in
for i = 0 to s_len - 1 do
len := !len + String.length (Array.unsafe_get s i)
done;
let target =
Bytes.create
(!len + (s_len - 1) * sep_len ) in
let hd = (Array.unsafe_get s 0) in
let hd_len = String.length hd in
String.unsafe_blit hd 0 target 0 hd_len;
let current_offset = ref hd_len in
for i = 1 to s_len - 1 do
String.unsafe_blit sep 0 target !current_offset sep_len;
let cur = Array.unsafe_get s i in
let cur_len = String.length cur in
let new_off_set = (!current_offset + sep_len ) in
String.unsafe_blit cur 0 target new_off_set cur_len;
current_offset :=
new_off_set + cur_len ;
done;
Bytes.unsafe_to_string target
let concat3 a b c =
let a_len = String.length a in
let b_len = String.length b in
let c_len = String.length c in
let len = a_len + b_len + c_len in
let target = Bytes.create len in
String.unsafe_blit a 0 target 0 a_len ;
String.unsafe_blit b 0 target a_len b_len;
String.unsafe_blit c 0 target (a_len + b_len) c_len;
Bytes.unsafe_to_string target
let concat4 a b c d =
let a_len = String.length a in
let b_len = String.length b in
let c_len = String.length c in
let d_len = String.length d in
let len = a_len + b_len + c_len + d_len in
let target = Bytes.create len in
String.unsafe_blit a 0 target 0 a_len ;
String.unsafe_blit b 0 target a_len b_len;
String.unsafe_blit c 0 target (a_len + b_len) c_len;
String.unsafe_blit d 0 target (a_len + b_len + c_len) d_len;
Bytes.unsafe_to_string target
let concat5 a b c d e =
let a_len = String.length a in
let b_len = String.length b in
let c_len = String.length c in
let d_len = String.length d in
let e_len = String.length e in
let len = a_len + b_len + c_len + d_len + e_len in
let target = Bytes.create len in
String.unsafe_blit a 0 target 0 a_len ;
String.unsafe_blit b 0 target a_len b_len;
String.unsafe_blit c 0 target (a_len + b_len) c_len;
String.unsafe_blit d 0 target (a_len + b_len + c_len) d_len;
String.unsafe_blit e 0 target (a_len + b_len + c_len + d_len) e_len;
Bytes.unsafe_to_string target
let inter2 a b =
concat3 a single_space b
let inter3 a b c =
concat5 a single_space b single_space c
let inter4 a b c d =
concat_array single_space [| a; b ; c; d|]
let parent_dir_lit = ".."
let current_dir_lit = "."
reference { ! Bytes.unppercase }
let capitalize_ascii (s : string) : string =
if String.length s = 0 then s
else
begin
let c = String.unsafe_get s 0 in
if (c >= 'a' && c <= 'z')
|| (c >= '\224' && c <= '\246')
|| (c >= '\248' && c <= '\254') then
let uc = Char.unsafe_chr (Char.code c - 32) in
let bytes = Bytes.of_string s in
Bytes.unsafe_set bytes 0 uc;
Bytes.unsafe_to_string bytes
else s
end
let capitalize_sub (s : string) len : string =
let slen = String.length s in
if len < 0 || len > slen then invalid_arg "Ext_string.capitalize_sub"
else
if len = 0 then ""
else
let bytes = Bytes.create len in
let uc =
let c = String.unsafe_get s 0 in
if (c >= 'a' && c <= 'z')
|| (c >= '\224' && c <= '\246')
|| (c >= '\248' && c <= '\254') then
Char.unsafe_chr (Char.code c - 32) else c in
Bytes.unsafe_set bytes 0 uc;
for i = 1 to len - 1 do
Bytes.unsafe_set bytes i (String.unsafe_get s i)
done ;
Bytes.unsafe_to_string bytes
let uncapitalize_ascii =
String.uncapitalize_ascii
let lowercase_ascii = String.lowercase_ascii
external (.![]) : string -> int -> int = "%string_unsafe_get"
let get_int_1_unsafe (x : string) off : int =
x.![off]
let get_int_2_unsafe (x : string) off : int =
x.![off] lor
x.![off+1] lsl 8
let get_int_3_unsafe (x : string) off : int =
x.![off] lor
x.![off+1] lsl 8 lor
x.![off+2] lsl 16
let get_int_4_unsafe (x : string) off : int =
x.![off] lor
x.![off+1] lsl 8 lor
x.![off+2] lsl 16 lor
x.![off+3] lsl 24
let get_1_2_3_4 (x : string) ~off len : int =
if len = 1 then get_int_1_unsafe x off
else if len = 2 then get_int_2_unsafe x off
else if len = 3 then get_int_3_unsafe x off
else if len = 4 then get_int_4_unsafe x off
else assert false
let unsafe_sub x offs len =
let b = Bytes.create len in
Ext_bytes.unsafe_blit_string x offs b 0 len;
(Bytes.unsafe_to_string b)
let is_valid_hash_number (x:string) =
let len = String.length x in
len > 0 && (
let a = x.![0] in
a <= 57 &&
(if len > 1 then
a > 48 &&
for_all_from x 1 (function '0' .. '9' -> true | _ -> false)
else
a >= 48 )
)
let hash_number_as_i32_exn
( x : string) : int32 =
Int32.of_string x
let first_marshal_char (x : string) =
x <> "" &&
( String.unsafe_get x 0 = '\132')
| null | https://raw.githubusercontent.com/rescript-lang/rescript-compiler/81a3dc63ca387b2af23fed297db283254ae3ab20/jscomp/ext/ext_string.ml | ocaml |
{[ split " test_unsafe_obj_ffi_ppx.cmi" ~keep_empty:false ' ']}
* return an index which is minus when [s] does not
end with [beg]
let check_suffix_case = ends_with
it is unsafe to expose such API as unsafe since
user can provide bad input range
* TODO: optimize
avoid nonterminating when string is empty
* TODO: can be improved to return a positive integer instead | Copyright ( C ) 2015 - 2016 Bloomberg Finance L.P.
* Copyright ( C ) 2017 - , Authors of ReScript
* This program is free software : you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation , either version 3 of the License , or
* ( at your option ) any later version .
*
* In addition to the permissions granted to you by the LGPL , you may combine
* or link a " work that uses the Library " with a publicly distributed version
* of this file to produce a combined library or application , then distribute
* that combined work under the terms of your choosing , with no requirement
* to comply with the obligations normally placed on you by section 4 of the
* LGPL version 3 ( or the corresponding section of a later version of the LGPL
* should you choose to use a later version ) .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU Lesser General Public License for more details .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc. , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 , USA .
* Copyright (C) 2017 - Hongbo Zhang, Authors of ReScript
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In addition to the permissions granted to you by the LGPL, you may combine
* or link a "work that uses the Library" with a publicly distributed version
* of this file to produce a combined library or application, then distribute
* that combined work under the terms of your choosing, with no requirement
* to comply with the obligations normally placed on you by section 4 of the
* LGPL version 3 (or the corresponding section of a later version of the LGPL
* should you choose to use a later version).
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *)
let split_by ?(keep_empty=false) is_delim str =
let len = String.length str in
let rec loop acc last_pos pos =
if pos = -1 then
if last_pos = 0 && not keep_empty then
acc
else
String.sub str 0 last_pos :: acc
else
if is_delim str.[pos] then
let new_len = (last_pos - pos - 1) in
if new_len <> 0 || keep_empty then
let v = String.sub str (pos + 1) new_len in
loop ( v :: acc)
pos (pos - 1)
else loop acc pos (pos - 1)
else loop acc last_pos (pos - 1)
in
loop [] len (len - 1)
let trim s =
let i = ref 0 in
let j = String.length s in
while !i < j &&
let u = String.unsafe_get s !i in
u = '\t' || u = '\n' || u = ' '
do
incr i;
done;
let k = ref (j - 1) in
while !k >= !i &&
let u = String.unsafe_get s !k in
u = '\t' || u = '\n' || u = ' ' do
decr k ;
done;
String.sub s !i (!k - !i + 1)
let split ?keep_empty str on =
if str = "" then [] else
split_by ?keep_empty (fun x -> (x : char) = on) str ;;
let quick_split_by_ws str : string list =
split_by ~keep_empty:false (fun x -> x = '\t' || x = '\n' || x = ' ') str
let starts_with s beg =
let beg_len = String.length beg in
let s_len = String.length s in
beg_len <= s_len &&
(let i = ref 0 in
while !i < beg_len
&& String.unsafe_get s !i =
String.unsafe_get beg !i do
incr i
done;
!i = beg_len
)
let rec ends_aux s end_ j k =
if k < 0 then (j + 1)
else if String.unsafe_get s j = String.unsafe_get end_ k then
ends_aux s end_ (j - 1) (k - 1)
else -1
let ends_with_index s end_ : int =
let s_finish = String.length s - 1 in
let s_beg = String.length end_ - 1 in
if s_beg > s_finish then -1
else
ends_aux s end_ s_finish s_beg
let ends_with s end_ = ends_with_index s end_ >= 0
let ends_with_then_chop s beg =
let i = ends_with_index s beg in
if i >= 0 then Some (String.sub s 0 i)
else None
let check_suffix_case_then_chop = ends_with_then_chop
let check_any_suffix_case s suffixes =
Ext_list.exists suffixes ( fun x - > check_suffix_case s x )
Ext_list.exists suffixes (fun x -> check_suffix_case s x) *)
let s suffixes =
let rec aux suffixes =
match suffixes with
| [ ] - > None
| x::xs - >
let i d = ends_with_index s x in
if i d > = 0 then Some ( String.sub s 0 i d )
else aux xs in
aux suffixes
let rec aux suffixes =
match suffixes with
| [] -> None
| x::xs ->
let id = ends_with_index s x in
if id >= 0 then Some (String.sub s 0 id)
else aux xs in
aux suffixes *)
let rec unsafe_for_all_range s ~start ~finish p =
start > finish ||
p (String.unsafe_get s start) &&
unsafe_for_all_range s ~start:(start + 1) ~finish p
let for_all_from s start p =
let len = String.length s in
if start < 0 then invalid_arg "Ext_string.for_all_from"
else unsafe_for_all_range s ~start ~finish:(len - 1) p
let for_all s (p : char -> bool) =
unsafe_for_all_range s ~start:0 ~finish:(String.length s - 1) p
let is_empty s = String.length s = 0
let repeat n s =
let len = String.length s in
let res = Bytes.create(n * len) in
for i = 0 to pred n do
String.blit s 0 res (i * len) len
done;
Bytes.to_string res
let unsafe_is_sub ~sub i s j ~len =
let rec check k =
if k = len
then true
else
String.unsafe_get sub (i+k) =
String.unsafe_get s (j+k) && check (k+1)
in
j+len <= String.length s && check 0
let find ?(start=0) ~sub s =
let exception Local_exit in
let n = String.length sub in
let s_len = String.length s in
let i = ref start in
try
while !i + n <= s_len do
if unsafe_is_sub ~sub 0 s !i ~len:n then
raise_notrace Local_exit;
incr i
done;
-1
with Local_exit ->
!i
let contain_substring s sub =
find s ~sub >= 0
let non_overlap_count ~sub s =
let sub_len = String.length sub in
let rec aux acc off =
let i = find ~start:off ~sub s in
if i < 0 then acc
else aux (acc + 1) (i + sub_len) in
if String.length sub = 0 then invalid_arg "Ext_string.non_overlap_count"
else aux 0 0
let rfind ~sub s =
let exception Local_exit in
let n = String.length sub in
let i = ref (String.length s - n) in
try
while !i >= 0 do
if unsafe_is_sub ~sub 0 s !i ~len:n then
raise_notrace Local_exit;
decr i
done;
-1
with Local_exit ->
!i
let tail_from s x =
let len = String.length s in
if x > len then invalid_arg ("Ext_string.tail_from " ^s ^ " : "^ string_of_int x )
else String.sub s x (len - x)
let equal (x : string) y = x = y
let rec index_rec s lim i c =
if i > = lim then -1 else
if String.unsafe_get s i = c then i
else index_rec s lim ( i + 1 ) c
if i >= lim then -1 else
if String.unsafe_get s i = c then i
else index_rec s lim (i + 1) c *)
let rec index_rec_count s lim i c count =
if i >= lim then -1 else
if String.unsafe_get s i = c then
if count = 1 then i
else index_rec_count s lim (i + 1) c (count - 1)
else index_rec_count s lim (i + 1) c count
let index_count s i c count =
let lim = String.length s in
if i < 0 || i >= lim || count < 1 then
invalid_arg ("index_count: ( " ^string_of_int i ^ "," ^string_of_int count ^ ")" );
index_rec_count s lim i c count
let index_next s i c =
index_count s i c 1
index_count s i c 1 *)
let extract_until s cursor c =
let len = s in
let start = ! cursor in
if start < 0 || start > = len then (
cursor : = -1 ;
" "
)
else
let i = in
let finish =
if i < 0 then (
cursor : = -1 ;
len
)
else (
cursor : = i + 1 ;
i
) in
String.sub s start ( finish - start )
let len = String.length s in
let start = !cursor in
if start < 0 || start >= len then (
cursor := -1;
""
)
else
let i = index_rec s len start c in
let finish =
if i < 0 then (
cursor := -1 ;
len
)
else (
cursor := i + 1;
i
) in
String.sub s start (finish - start) *)
let rec rindex_rec s i c =
if i < 0 then i else
if String.unsafe_get s i = c then i else rindex_rec s (i - 1) c;;
let rec rindex_rec_opt s i c =
if i < 0 then None else
if String.unsafe_get s i = c then Some i else rindex_rec_opt s (i - 1) c;;
let rindex_neg s c =
rindex_rec s (String.length s - 1) c;;
let rindex_opt s c =
rindex_rec_opt s (String.length s - 1) c;;
let rec unsafe_no_char x ch i last_idx =
i > last_idx ||
(String.unsafe_get x i <> ch && unsafe_no_char x ch (i + 1) last_idx)
let rec unsafe_no_char_idx x ch i last_idx =
if i > last_idx then -1
else
if String.unsafe_get x i <> ch then
unsafe_no_char_idx x ch (i + 1) last_idx
else i
let no_char x ch i len : bool =
let str_len = String.length x in
if i < 0 || i >= str_len || len >= str_len then invalid_arg "Ext_string.no_char"
else unsafe_no_char x ch i len
let no_slash x =
unsafe_no_char x '/' 0 (String.length x - 1)
let no_slash_idx x =
unsafe_no_char_idx x '/' 0 (String.length x - 1)
let no_slash_idx_from x from =
let last_idx = String.length x - 1 in
assert (from >= 0);
unsafe_no_char_idx x '/' from last_idx
let replace_slash_backward (x : string ) =
let len = String.length x in
if unsafe_no_char x '/' 0 (len - 1) then x
else
String.map (function
| '/' -> '\\'
| x -> x ) x
let replace_backward_slash (x : string)=
let len = String.length x in
if unsafe_no_char x '\\' 0 (len -1) then x
else
String.map (function
|'\\'-> '/'
| x -> x) x
let empty = ""
#ifdef BROWSER
let compare = Bs_hash_stubs.string_length_based_compare
#else
external compare : string -> string -> int = "caml_string_length_based_compare" [@@noalloc];;
#endif
let single_space = " "
let single_colon = ":"
let concat_array sep (s : string array) =
let s_len = Array.length s in
match s_len with
| 0 -> empty
| 1 -> Array.unsafe_get s 0
| _ ->
let sep_len = String.length sep in
let len = ref 0 in
for i = 0 to s_len - 1 do
len := !len + String.length (Array.unsafe_get s i)
done;
let target =
Bytes.create
(!len + (s_len - 1) * sep_len ) in
let hd = (Array.unsafe_get s 0) in
let hd_len = String.length hd in
String.unsafe_blit hd 0 target 0 hd_len;
let current_offset = ref hd_len in
for i = 1 to s_len - 1 do
String.unsafe_blit sep 0 target !current_offset sep_len;
let cur = Array.unsafe_get s i in
let cur_len = String.length cur in
let new_off_set = (!current_offset + sep_len ) in
String.unsafe_blit cur 0 target new_off_set cur_len;
current_offset :=
new_off_set + cur_len ;
done;
Bytes.unsafe_to_string target
let concat3 a b c =
let a_len = String.length a in
let b_len = String.length b in
let c_len = String.length c in
let len = a_len + b_len + c_len in
let target = Bytes.create len in
String.unsafe_blit a 0 target 0 a_len ;
String.unsafe_blit b 0 target a_len b_len;
String.unsafe_blit c 0 target (a_len + b_len) c_len;
Bytes.unsafe_to_string target
let concat4 a b c d =
let a_len = String.length a in
let b_len = String.length b in
let c_len = String.length c in
let d_len = String.length d in
let len = a_len + b_len + c_len + d_len in
let target = Bytes.create len in
String.unsafe_blit a 0 target 0 a_len ;
String.unsafe_blit b 0 target a_len b_len;
String.unsafe_blit c 0 target (a_len + b_len) c_len;
String.unsafe_blit d 0 target (a_len + b_len + c_len) d_len;
Bytes.unsafe_to_string target
let concat5 a b c d e =
let a_len = String.length a in
let b_len = String.length b in
let c_len = String.length c in
let d_len = String.length d in
let e_len = String.length e in
let len = a_len + b_len + c_len + d_len + e_len in
let target = Bytes.create len in
String.unsafe_blit a 0 target 0 a_len ;
String.unsafe_blit b 0 target a_len b_len;
String.unsafe_blit c 0 target (a_len + b_len) c_len;
String.unsafe_blit d 0 target (a_len + b_len + c_len) d_len;
String.unsafe_blit e 0 target (a_len + b_len + c_len + d_len) e_len;
Bytes.unsafe_to_string target
let inter2 a b =
concat3 a single_space b
let inter3 a b c =
concat5 a single_space b single_space c
let inter4 a b c d =
concat_array single_space [| a; b ; c; d|]
let parent_dir_lit = ".."
let current_dir_lit = "."
reference { ! Bytes.unppercase }
let capitalize_ascii (s : string) : string =
if String.length s = 0 then s
else
begin
let c = String.unsafe_get s 0 in
if (c >= 'a' && c <= 'z')
|| (c >= '\224' && c <= '\246')
|| (c >= '\248' && c <= '\254') then
let uc = Char.unsafe_chr (Char.code c - 32) in
let bytes = Bytes.of_string s in
Bytes.unsafe_set bytes 0 uc;
Bytes.unsafe_to_string bytes
else s
end
let capitalize_sub (s : string) len : string =
let slen = String.length s in
if len < 0 || len > slen then invalid_arg "Ext_string.capitalize_sub"
else
if len = 0 then ""
else
let bytes = Bytes.create len in
let uc =
let c = String.unsafe_get s 0 in
if (c >= 'a' && c <= 'z')
|| (c >= '\224' && c <= '\246')
|| (c >= '\248' && c <= '\254') then
Char.unsafe_chr (Char.code c - 32) else c in
Bytes.unsafe_set bytes 0 uc;
for i = 1 to len - 1 do
Bytes.unsafe_set bytes i (String.unsafe_get s i)
done ;
Bytes.unsafe_to_string bytes
let uncapitalize_ascii =
String.uncapitalize_ascii
let lowercase_ascii = String.lowercase_ascii
external (.![]) : string -> int -> int = "%string_unsafe_get"
let get_int_1_unsafe (x : string) off : int =
x.![off]
let get_int_2_unsafe (x : string) off : int =
x.![off] lor
x.![off+1] lsl 8
let get_int_3_unsafe (x : string) off : int =
x.![off] lor
x.![off+1] lsl 8 lor
x.![off+2] lsl 16
let get_int_4_unsafe (x : string) off : int =
x.![off] lor
x.![off+1] lsl 8 lor
x.![off+2] lsl 16 lor
x.![off+3] lsl 24
let get_1_2_3_4 (x : string) ~off len : int =
if len = 1 then get_int_1_unsafe x off
else if len = 2 then get_int_2_unsafe x off
else if len = 3 then get_int_3_unsafe x off
else if len = 4 then get_int_4_unsafe x off
else assert false
let unsafe_sub x offs len =
let b = Bytes.create len in
Ext_bytes.unsafe_blit_string x offs b 0 len;
(Bytes.unsafe_to_string b)
let is_valid_hash_number (x:string) =
let len = String.length x in
len > 0 && (
let a = x.![0] in
a <= 57 &&
(if len > 1 then
a > 48 &&
for_all_from x 1 (function '0' .. '9' -> true | _ -> false)
else
a >= 48 )
)
let hash_number_as_i32_exn
( x : string) : int32 =
Int32.of_string x
let first_marshal_char (x : string) =
x <> "" &&
( String.unsafe_get x 0 = '\132')
|
d1a62496a0360d3a9bfbd1a2d453864843e8232c07020eccd4bb9e0b0c16038e | janestreet/async_unix | writer_intf.ml | open! Core
open! Import
module type Writer0 = sig
* [ Writer ] is Async 's main API for output to a file descriptor . It is the analog of
[ Core . Out_channel ] .
Each writer has an internal buffer , to which [ Writer.write * ] adds data . Each writer
uses an Async cooperative thread that makes [ write ( ) ] system calls to move the data
from the writer 's buffer to an OS buffer via the file descriptor .
There is no guarantee that the data sync on the other side of the writer can keep up
with the rate at which you are writing . If it can not , the OS buffer will fill up and
the writer 's cooperative thread will be unable to send any bytes . In that case , calls
to [ Writer.write * ] will grow the writer 's buffer without bound , as long as your
program produces data . One solution to this problem is to call [ Writer.flushed ] and
not continue until that becomes determined , which will only happen once the bytes in
the writer 's buffer have been successfully transferred to the OS buffer . Another
solution is to check [ Writer.bytes_to_write ] and not produce any more data if that is
beyond some bound .
There are two kinds of errors that one can handle with writers . First , a writer can be
[ close]d , which will cause future [ write]s ( and other operations ) to synchronously
raise an exception . Second , the writer 's cooperative thread can fail due to a
[ write ( ) ] system call failing . This will cause an exception to be sent to the writer 's
monitor , which will be a child of the monitor in effect when the writer is created .
One can deal with such asynchronous exceptions in the usual way , by handling the
stream returned by [ Monitor.detach_and_get_error_stream ( Writer.monitor writer ) ] .
[Core.Out_channel].
Each writer has an internal buffer, to which [Writer.write*] adds data. Each writer
uses an Async cooperative thread that makes [write()] system calls to move the data
from the writer's buffer to an OS buffer via the file descriptor.
There is no guarantee that the data sync on the other side of the writer can keep up
with the rate at which you are writing. If it cannot, the OS buffer will fill up and
the writer's cooperative thread will be unable to send any bytes. In that case, calls
to [Writer.write*] will grow the writer's buffer without bound, as long as your
program produces data. One solution to this problem is to call [Writer.flushed] and
not continue until that becomes determined, which will only happen once the bytes in
the writer's buffer have been successfully transferred to the OS buffer. Another
solution is to check [Writer.bytes_to_write] and not produce any more data if that is
beyond some bound.
There are two kinds of errors that one can handle with writers. First, a writer can be
[close]d, which will cause future [write]s (and other operations) to synchronously
raise an exception. Second, the writer's cooperative thread can fail due to a
[write()] system call failing. This will cause an exception to be sent to the writer's
monitor, which will be a child of the monitor in effect when the writer is created.
One can deal with such asynchronous exceptions in the usual way, by handling the
stream returned by [Monitor.detach_and_get_error_stream (Writer.monitor writer)]. *)
module Id : Unique_id
module Line_ending : sig
type t =
| Dos
| Unix
[@@deriving sexp_of]
end
type t [@@deriving sexp_of]
include Invariant.S with type t := t
* [ stdout ] and [ stderr ] are writers for file descriptors 1 and 2 . They are lazy because
we do n't want to create them in all programs that happen to link with Async .
When either [ stdout ] or [ stderr ] is created , they both are created . Furthermore , if
they point to the same inode , then they will be the same writer to [ Fd.stdout ] . This
can be confusing , because [ fd ( force stderr ) ] will be [ Fd.stdout ] , not [ Fd.stderr ] .
And subsequent modifications of [ Fd.stderr ] will have no effect on [ Writer.stderr ] .
Unfortunately , the sharing is necessary because Async uses OS threads to do [ write ( ) ]
syscalls using the writer buffer . When calling a program that redirects stdout and
stderr to the same file , as in :
{ v
foo.exe > /tmp / z.file 2>&1
v }
if [ Writer.stdout ] and [ Writer.stderr ] were n't the same writer , then they could have
threads simultaneously writing to the same file , which could easily cause data
loss .
we don't want to create them in all programs that happen to link with Async.
When either [stdout] or [stderr] is created, they both are created. Furthermore, if
they point to the same inode, then they will be the same writer to [Fd.stdout]. This
can be confusing, because [fd (force stderr)] will be [Fd.stdout], not [Fd.stderr].
And subsequent modifications of [Fd.stderr] will have no effect on [Writer.stderr].
Unfortunately, the sharing is necessary because Async uses OS threads to do [write()]
syscalls using the writer buffer. When calling a program that redirects stdout and
stderr to the same file, as in:
{v
foo.exe >/tmp/z.file 2>&1
v}
if [Writer.stdout] and [Writer.stderr] weren't the same writer, then they could have
threads simultaneously writing to the same file, which could easily cause data
loss. *)
val stdout : t Lazy.t
val stderr : t Lazy.t
type buffer_age_limit =
[ `At_most of Time.Span.t
| `Unlimited
]
[@@deriving bin_io, sexp]
* [ create ? buf_len ? syscall ? buffer_age_limit fd ] creates a new writer . The file
descriptor [ fd ] should not be in use for writing by anything else .
By default , a write system call occurs at the end of a cycle in which bytes were
written . One can supply [ ~syscall:(`Periodic span ) ] to get better performance . This
batches writes together , doing the write system call periodically according to the
supplied span .
A writer can asynchronously fail if the underlying write syscall returns an error ,
e.g. , [ EBADF ] , [ EPIPE ] , [ ] , ....
[ buffer_age_limit ] specifies how backed up you can get before raising an exception .
The default is [ ` Unlimited ] for files , and 2 minutes for other kinds of file
descriptors . You can supply [ ` Unlimited ] to turn off buffer - age checks .
[ raise_when_consumer_leaves ] specifies whether the writer should raise an exception
when the consumer receiving bytes from the writer leaves , i.e. , in Unix , the write
syscall returns [ EPIPE ] or [ ] . If [ not raise_when_consumer_leaves ] , then
the writer will silently drop all writes after the consumer leaves , and the writer
will eventually fail with a writer - buffer - older - than error if the application remains
open long enough .
[ line_ending ] determines how [ newline ] and [ write_line ] terminate lines by default .
If [ line_ending = Unix ] then end of line is [ " \n " ] ; if [ line_ending = Dos ] then end of
line is [ " \r\n " ] . Note that [ line_ending = Dos ] is not equivalent to opening the file
in text mode because any " \n " characters being printed by other means ( e.g. , [ write
" \n " ] ) are still written verbatim ( in Unix style ) .
[ time_source ] is useful in tests to trigger [ buffer_age_limit]-related conditions , or
simply to have the result of ( for example ) [ flushed_time_ns ] agree with your test 's
synthetic time . It is also used to schedule the [ ` Periodic ] syscalls .
descriptor [fd] should not be in use for writing by anything else.
By default, a write system call occurs at the end of a cycle in which bytes were
written. One can supply [~syscall:(`Periodic span)] to get better performance. This
batches writes together, doing the write system call periodically according to the
supplied span.
A writer can asynchronously fail if the underlying write syscall returns an error,
e.g., [EBADF], [EPIPE], [ECONNRESET], ....
[buffer_age_limit] specifies how backed up you can get before raising an exception.
The default is [`Unlimited] for files, and 2 minutes for other kinds of file
descriptors. You can supply [`Unlimited] to turn off buffer-age checks.
[raise_when_consumer_leaves] specifies whether the writer should raise an exception
when the consumer receiving bytes from the writer leaves, i.e., in Unix, the write
syscall returns [EPIPE] or [ECONNRESET]. If [not raise_when_consumer_leaves], then
the writer will silently drop all writes after the consumer leaves, and the writer
will eventually fail with a writer-buffer-older-than error if the application remains
open long enough.
[line_ending] determines how [newline] and [write_line] terminate lines by default.
If [line_ending = Unix] then end of line is ["\n"]; if [line_ending = Dos] then end of
line is ["\r\n"]. Note that [line_ending = Dos] is not equivalent to opening the file
in text mode because any "\n" characters being printed by other means (e.g., [write
"\n"]) are still written verbatim (in Unix style).
[time_source] is useful in tests to trigger [buffer_age_limit]-related conditions, or
simply to have the result of (for example) [flushed_time_ns] agree with your test's
synthetic time. It is also used to schedule the [`Periodic] syscalls. *)
val create
: ?buf_len:int
-> ?syscall:[ `Per_cycle | `Periodic of Time.Span.t ]
-> ?buffer_age_limit:buffer_age_limit
-> ?raise_when_consumer_leaves:bool (** default is [true] *)
-> ?line_ending:Line_ending.t (** default is [Unix] *)
-> ?time_source:[> read ] Time_source.T1.t
(** default is [Time_source.wall_clock ()] *)
-> Fd.t
-> t
val raise_when_consumer_leaves : t -> bool
* [ set_raise_when_consumer_leaves t bool ] sets the [ raise_when_consumer_leaves ] flag of
[ t ] , which determies how [ t ] responds to a write system call raising [ EPIPE ] and
[ ] ( see [ create ] ) .
[t], which determies how [t] responds to a write system call raising [EPIPE] and
[ECONNRESET] (see [create]). *)
val set_raise_when_consumer_leaves : t -> bool -> unit
(** [set_buffer_age_limit t buffer_age_limit] replaces the existing buffer age limit with
the new one. This is useful for stdout and stderr, which are lazily created in a
context that does not allow applications to specify [buffer_age_limit]. *)
val set_buffer_age_limit : t -> buffer_age_limit -> unit
(** [consumer_left t] returns a deferred that becomes determined when [t] attempts to
write to a pipe that broke because the consumer on the other side left. *)
val consumer_left : t -> unit Deferred.t
val of_out_channel : Out_channel.t -> Fd.Kind.t -> t
(** [open_file file] opens [file] for writing and returns a writer for it. It uses
[Unix_syscalls.openfile] to open the file. *)
val open_file
: ?info:Info.t (** for errors. Defaults to the file path. *)
-> ?append:bool (** default is [false], meaning truncate instead *)
-> ?buf_len:int
-> ?syscall:[ `Per_cycle | `Periodic of Time.Span.t ]
* default is [ 0o666 ]
-> ?line_ending:Line_ending.t (** default is [Unix] *)
-> ?time_source:[> read ] Time_source.T1.t
(** default is [Time_source.wall_clock ()] *)
-> string
-> t Deferred.t
* [ with_file ~file f ] opens [ file ] for writing , creates a writer [ t ] , and runs [ f t ] to
obtain a deferred [ d ] . When [ d ] becomes determined , the writer is closed . When the
close completes , the result of [ with_file ] becomes determined with the value of [ d ] .
There is no need to call [ Writer.flushed ] to ensure that [ with_file ] waits for the
writer to be flushed before closing it . [ Writer.close ] will already wait for the
flush .
[ exclusive = true ] uses a filesystem lock to try and make sure that the file is not
modified during a concurrent read or write operation . This is an advisory lock , which
means that the reader must be cooperating by taking a relevant lock when writing ( see
[ Reader.with_file ] ) . This is unrelated and should not be confused with the [ O_EXCL ]
flag in [ open ] systemcall . Note that the implementation uses [ Unix.lockf ] , which has
known pitfalls . It 's recommended that you avoid the [ exclusive ] flag in favor of
using a library dedicated to dealing with file locks where the pitfalls can be
documented in detail .
obtain a deferred [d]. When [d] becomes determined, the writer is closed. When the
close completes, the result of [with_file] becomes determined with the value of [d].
There is no need to call [Writer.flushed] to ensure that [with_file] waits for the
writer to be flushed before closing it. [Writer.close] will already wait for the
flush.
[exclusive = true] uses a filesystem lock to try and make sure that the file is not
modified during a concurrent read or write operation. This is an advisory lock, which
means that the reader must be cooperating by taking a relevant lock when writing (see
[Reader.with_file]). This is unrelated and should not be confused with the [O_EXCL]
flag in [open] systemcall. Note that the implementation uses [Unix.lockf], which has
known pitfalls. It's recommended that you avoid the [exclusive] flag in favor of
using a library dedicated to dealing with file locks where the pitfalls can be
documented in detail.
*)
val with_file
* default is [ 0o666 ]
-> ?append:bool (** default is [false], meaning truncate instead *)
-> ?syscall:[ `Per_cycle | `Periodic of Time.Span.t ]
-> ?exclusive:bool (** default is [false] *)
-> ?line_ending:Line_ending.t (** default is [Unix] *)
-> ?time_source:[> read ] Time_source.T1.t
(** default is [Time_source.wall_clock ()] *)
-> string
-> f:(t -> 'a Deferred.t)
-> 'a Deferred.t
(** [id] returns an id for this writer that is unique among all other writers. *)
val id : t -> Id.t
(** [fd] returns the [Fd.t] used to create this writer. *)
val fd : t -> Fd.t
* [ set_fd t fd ] sets the [ fd ] used by [ t ] for its underlying system calls . It first
waits until everything being sent to the current [ fd ] is flushed . Of course , one must
understand how the writer works and what one is doing to use this .
waits until everything being sent to the current [fd] is flushed. Of course, one must
understand how the writer works and what one is doing to use this. *)
val set_fd : t -> Fd.t -> unit Deferred.t
* [ write_gen t a ] writes [ a ] to writer [ t ] , with [ length ] specifying the number of bytes
needed and [ blit_to_bigstring ] blitting [ a ] directly into the [ t ] 's buffer . If one
has a type that has [ length ] and [ blit_to_bigstring ] functions , like :
{ [
module A : sig
type t
val length : t - > int
val blit_to_bigstring : ( t , Bigstring.t ) Blit.blit
end ] }
then one can use [ write_gen ] to implement a custom analog of [ Writer.write ] , like :
{ [
module Write_a : sig
write : ? pos : int - > ? len : int - > A.t - > Writer.t - > unit
end = struct
let write ? pos ? len a writer =
Writer.write_gen
: A.length
~blit_to_bigstring : A.blit_to_bigstring
? pos ? len writer a
end ] }
In some cases it may be difficult to write only part of a value :
{ [
module B : sig
type t
val length : t - > int
val blit_to_bigstring : t - > Bigstring.t - > pos : int - > unit
end ] }
In these cases , use [ write_gen_whole ] instead . It never requires writing only part of
a value , although it is potentially less space - efficient . It may waste portions of
previously - allocated write buffers if they are too small .
{ [
module Write_b : sig
write : B.t - > Writer.t - > unit
end = struct
let write b writer =
Writer.write_gen_whole
: B.length
~blit_to_bigstring : B.blit_to_bigstring
writer b
end ] }
Note : [ write_gen ] and [ write_gen_whole ] give you access to the writer 's internal
buffer . You should not capture it ; doing so might lead to errors of the segfault
kind .
needed and [blit_to_bigstring] blitting [a] directly into the [t]'s buffer. If one
has a type that has [length] and [blit_to_bigstring] functions, like:
{[
module A : sig
type t
val length : t -> int
val blit_to_bigstring : (t, Bigstring.t) Blit.blit
end ]}
then one can use [write_gen] to implement a custom analog of [Writer.write], like:
{[
module Write_a : sig
val write : ?pos:int -> ?len:int -> A.t -> Writer.t -> unit
end = struct
let write ?pos ?len a writer =
Writer.write_gen
~length:A.length
~blit_to_bigstring:A.blit_to_bigstring
?pos ?len writer a
end ]}
In some cases it may be difficult to write only part of a value:
{[
module B : sig
type t
val length : t -> int
val blit_to_bigstring : t -> Bigstring.t -> pos:int -> unit
end ]}
In these cases, use [write_gen_whole] instead. It never requires writing only part of
a value, although it is potentially less space-efficient. It may waste portions of
previously-allocated write buffers if they are too small.
{[
module Write_b : sig
val write : B.t -> Writer.t -> unit
end = struct
let write b writer =
Writer.write_gen_whole
~length:B.length
~blit_to_bigstring:B.blit_to_bigstring
writer b
end ]}
Note: [write_gen] and [write_gen_whole] give you access to the writer's internal
buffer. You should not capture it; doing so might lead to errors of the segfault
kind. *)
val write_gen
: ?pos:int
-> ?len:int
-> t
-> 'a
-> blit_to_bigstring:
(src:'a -> src_pos:int -> dst:Bigstring.t -> dst_pos:int -> len:int -> unit)
-> length:('a -> int)
-> unit
val write_gen_whole
: t
-> 'a
-> blit_to_bigstring:('a -> Bigstring.t -> pos:int -> unit)
-> length:('a -> int)
-> unit
(** [write_direct t ~f] gives [t]'s internal buffer to [f]. [pos] and [len] define the
portion of the buffer that can be filled. [f] must return a pair [(x, written)] where
[written] is the number of bytes written to the buffer at [pos]. [write_direct]
raises if [written < 0 || written > len]. [write_direct] returns [Some x], or [None]
if the writer is stopped. By using [write_direct] only, one can ensure that the
writer's internal buffer never grows. Look at the [write_direct] expect tests for an
example of how this can be used to construct a [write_string] like function that never
grows the internal buffer. *)
val write_direct : t -> f:(Bigstring.t -> pos:int -> len:int -> 'a * int) -> 'a option
(** [write ?pos ?len t s] adds a job to the writer's queue of pending writes. The
contents of the string are copied to an internal buffer before [write] returns, so
clients can do whatever they want with [s] after that. *)
val write_bytes : ?pos:int -> ?len:int -> t -> Bytes.t -> unit
val write : ?pos:int -> ?len:int -> t -> string -> unit
val write_bigstring : ?pos:int -> ?len:int -> t -> Bigstring.t -> unit
val write_iobuf : ?pos:int -> ?len:int -> t -> ([> read ], _) Iobuf.t -> unit
val write_substring : t -> Substring.t -> unit
val write_bigsubstring : t -> Bigsubstring.t -> unit
val writef : t -> ('a, unit, string, unit) format4 -> 'a
(** [to_formatter] returns an OCaml-formatter that one can print to using
{!Format.fprintf}. Note that flushing the formatter will only submit all buffered
data to the writer, but does {e not} guarantee flushing to the operating system. *)
val to_formatter : t -> Format.formatter
(** [write_char t c] writes the character. *)
val write_char : t -> char -> unit
(** [newline t] writes the end-of-line terminator. [line_ending] can override [t]'s
[line_ending]. *)
val newline : ?line_ending:Line_ending.t -> t -> unit
(** [write_line t s ?line_ending] is [write t s; newline t ?line_ending]. *)
val write_line : ?line_ending:Line_ending.t -> t -> string -> unit
* [ write_byte t i ] writes one 8 - bit integer ( as the single character with that code ) .
The given integer is taken modulo 256 .
The given integer is taken modulo 256. *)
val write_byte : t -> int -> unit
module Terminate_with : sig
type t =
| Newline
| Space_if_needed
[@@deriving sexp_of]
end
(** [write_sexp t sexp] writes to [t] the string representation of [sexp], possibly
followed by a terminating character as per [Terminate_with]. With
[~terminate_with:Newline], the terminating character is a newline. With
[~terminate_with:Space_if_needed], if a space is needed to ensure that the sexp reader
knows that it has reached the end of the sexp, then the terminating character will be
a space; otherwise, no terminating character is added. A terminating space is needed
if the string representation doesn't end in [')'] or ['"']. *)
val write_sexp
: ?hum:bool (** default is [false] *)
-> ?terminate_with:Terminate_with.t (** default is [Space_if_needed] *)
-> t
-> Sexp.t
-> unit
* [ write_bin_prot ] writes out a value using its bin_prot sizer / writer pair . The format
is the " size - prefixed binary protocol " , in which the length of the data is written
before the data itself . This is the format that [ ] reads .
is the "size-prefixed binary protocol", in which the length of the data is written
before the data itself. This is the format that [Reader.read_bin_prot] reads. *)
val write_bin_prot : t -> 'a Bin_prot.Type_class.writer -> 'a -> unit
* Writes out a value using its bin_prot writer . Unlike [ write_bin_prot ] , this does n't
prefix the output with the size of the bin_prot blob . [ size ] is the expected size .
This function will raise if the bin_prot writer writes an amount other than [ size ]
bytes .
prefix the output with the size of the bin_prot blob. [size] is the expected size.
This function will raise if the bin_prot writer writes an amount other than [size]
bytes. *)
val write_bin_prot_no_size_header
: t
-> size:int
-> 'a Bin_prot.Write.writer
-> 'a
-> unit
* Unlike the [ write _ ] functions , all functions starting with [ schedule _ ] require
flushing or closing of the writer after returning before it is safe to modify the
bigstrings which were directly or indirectly passed to these functions . The reason is
that these bigstrings will be read from directly when writing ; their contents is not
copied to internal buffers .
This is important if users need to send the same large data string to a huge number of
clients simultaneously ( e.g. , on a cluster ) , because these functions then avoid
needlessly exhausting memory by sharing the data .
flushing or closing of the writer after returning before it is safe to modify the
bigstrings which were directly or indirectly passed to these functions. The reason is
that these bigstrings will be read from directly when writing; their contents is not
copied to internal buffers.
This is important if users need to send the same large data string to a huge number of
clients simultaneously (e.g., on a cluster), because these functions then avoid
needlessly exhausting memory by sharing the data. *)
* [ schedule_bigstring t bstr ] schedules a write of ] . It is not safe to
change the bigstring until the writer has been successfully flushed or closed after
this operation .
change the bigstring until the writer has been successfully flushed or closed after
this operation. *)
val schedule_bigstring : t -> ?pos:int -> ?len:int -> Bigstring.t -> unit
val schedule_bigsubstring : t -> Bigsubstring.t -> unit
(** [schedule_iobuf_peek] is like [schedule_bigstring], but for an iobuf. It is not safe
to change the iobuf until the writer has been successfully flushed or closed after
this operation. *)
val schedule_iobuf_peek : t -> ?pos:int -> ?len:int -> ([> read ], _) Iobuf.t -> unit
(** [schedule_iobuf_consume] is like [schedule_iobuf_peek]. Once the result is determined,
the iobuf will be fully consumed (or advanced by [min len (Iobuf.length iobuf)] if
[len] is specified), and the writer will be flushed. *)
val schedule_iobuf_consume
: t
-> ?len:int
-> ([> read ], Iobuf.seek) Iobuf.t
-> unit Deferred.t
module Destroy_or_keep : sig
type t =
| Destroy
| Keep
[@@deriving sexp_of]
end
* [ schedule_iovec t iovec ] schedules a write of I / O - vector [ ] . It is not safe to
change the bigstrings underlying the I / O - vector until the writer has been successfully
flushed or closed after this operation .
change the bigstrings underlying the I/O-vector until the writer has been successfully
flushed or closed after this operation. *)
val schedule_iovec
: ?destroy_or_keep:Destroy_or_keep.t (** default is [Keep] *)
-> t
-> Bigstring.t Unix.IOVec.t
-> unit
* [ schedule_iovecs t iovecs ] like { ! } , but takes a whole queue [ iovecs ] of
I / O - vectors as argument . The queue is guaranteed to be empty when this function
returns and can be modified . It is not safe to change the bigstrings underlying the
I / O - vectors until the writer has been successfully flushed or closed after this
operation .
I/O-vectors as argument. The queue is guaranteed to be empty when this function
returns and can be modified. It is not safe to change the bigstrings underlying the
I/O-vectors until the writer has been successfully flushed or closed after this
operation. *)
val schedule_iovecs : t -> Bigstring.t Unix.IOVec.t Queue.t -> unit
module Flush_result : sig
type t =
| Error
(** [Error] is accompanied by a detailed error being sent to the writer's monitor. *)
| Consumer_left
* [ Consumer_left ] is returned when the consumer leaves ( see { ! consumer_left } ) and
{ ! is set to [ false ] . If that flag is set to [ true ] ,
then you get an [ Error ] instead .
{!raise_when_consumer_leaves} is set to [false]. If that flag is set to [true],
then you get an [Error] instead. *)
| Flushed of Time_ns.t
(** The time just after the [write()] system call returned or
the time [flushed_*] was called if all the writes were already flushed by then. *)
[@@deriving sexp_of]
end
* [ flushed_or_failed_with_result t ] returns a deferred that will become determined when
all prior writes complete ( i.e. the [ write ( ) ] system call returns ) , or when any of
them fail .
Handling the [ Error ] case can be tricky due to the following race : the result gets
determined concurrently with the exception propagation through the writer 's monitor .
The caller needs to make sure that the program behavior does not depend on which
signal propagates first .
all prior writes complete (i.e. the [write()] system call returns), or when any of
them fail.
Handling the [Error] case can be tricky due to the following race: the result gets
determined concurrently with the exception propagation through the writer's monitor.
The caller needs to make sure that the program behavior does not depend on which
signal propagates first.
*)
val flushed_or_failed_with_result : t -> Flush_result.t Deferred.t
(** [flushed_or_failed_unit t] returns a deferred that will become
determined when all prior writes complete, or when any of them fail.
Unlike {!flushed_or_failed_with_result}, its return value gives you no indication of
which happened. In the [Error] case, the result will be determined in parallel with
the error propagating to the writer's monitor. The caller should robustly handle
either side winning that race.
*)
val flushed_or_failed_unit : t -> unit Deferred.t
(** [flushed t] returns a deferred that will become determined when all prior writes
complete (i.e. the [write()] system call returns). If a prior write fails, then the
deferred will never become determined.
It is OK to call [flushed t] after [t] has been closed. *)
val flushed : t -> unit Deferred.t
val flushed_time : t -> Time.t Deferred.t
val flushed_time_ns : t -> Time_ns.t Deferred.t
(** [fsync t] calls [flushed t] before calling [Unix.fsync] on the underlying
file descriptor *)
val fsync : t -> unit Deferred.t
(** [fdatasync t] calls [flushed t] before calling [Unix.fdatasync] on the
underlying file descriptor *)
val fdatasync : t -> unit Deferred.t
(** [send] writes a string to the writer that can be read back using [Reader.recv]. *)
val send : t -> string -> unit
(** [monitor t] returns the writer's monitor. *)
val monitor : t -> Monitor.t
* [ close ? force_close t ] waits for the writer to be flushed , and then calls [ Unix.close ]
on the underlying file descriptor . [ force_close ] causes the [ Unix.close ] to happen
even if the flush hangs . By default [ force_close ] is [ Deferred.never ( ) ] for files
and [ after ( sec 5 ) ] for other types of file descriptors ( e.g. , sockets ) . If the close
is forced , data in the writer 's buffer may not be written to the file descriptor . You
can check this by calling [ bytes_to_write ] after [ close ] finishes .
WARNING : [ force_close ] will not reliably stop any write that is in progress .
If there are any in - flight system calls , it will wait for them to finish , which
includes [ writev ] , which can legitimately block forever .
[ close ] will raise an exception if the [ Unix.close ] on the underlying file descriptor
fails .
You must call [ close ] on a writer in order to close the underlying file descriptor .
Not doing so will cause a file descriptor leak . It also will cause a space leak ,
because until the writer is closed , it is held on to in order to flush the writer on
shutdown .
It is an error to call other operations on [ t ] after [ close t ] has been called , except
that calls of [ close ] subsequent to the original call to [ close ] will return the same
deferred as the original call .
[ close_started t ] becomes determined as soon as [ close ] is called .
[ close_finished t ] becomes determined after [ t ] 's underlying file descriptor has been
closed , i.e. , it is the same as the result of [ close ] . [ close_finished ] differs from
[ close ] in that it does not have the side effect of initiating a close .
[ is_closed t ] returns [ true ] iff [ close t ] has been called .
[ is_open t ] is [ not ( is_closed t ) ]
[ with_close t ~f ] runs [ f ( ) ] , and closes [ t ] after [ f ] finishes or raises .
on the underlying file descriptor. [force_close] causes the [Unix.close] to happen
even if the flush hangs. By default [force_close] is [Deferred.never ()] for files
and [after (sec 5)] for other types of file descriptors (e.g., sockets). If the close
is forced, data in the writer's buffer may not be written to the file descriptor. You
can check this by calling [bytes_to_write] after [close] finishes.
WARNING: [force_close] will not reliably stop any write that is in progress.
If there are any in-flight system calls, it will wait for them to finish, which
includes [writev], which can legitimately block forever.
[close] will raise an exception if the [Unix.close] on the underlying file descriptor
fails.
You must call [close] on a writer in order to close the underlying file descriptor.
Not doing so will cause a file descriptor leak. It also will cause a space leak,
because until the writer is closed, it is held on to in order to flush the writer on
shutdown.
It is an error to call other operations on [t] after [close t] has been called, except
that calls of [close] subsequent to the original call to [close] will return the same
deferred as the original call.
[close_started t] becomes determined as soon as [close] is called.
[close_finished t] becomes determined after [t]'s underlying file descriptor has been
closed, i.e., it is the same as the result of [close]. [close_finished] differs from
[close] in that it does not have the side effect of initiating a close.
[is_closed t] returns [true] iff [close t] has been called.
[is_open t] is [not (is_closed t)]
[with_close t ~f] runs [f ()], and closes [t] after [f] finishes or raises. *)
val close : ?force_close:unit Deferred.t -> t -> unit Deferred.t
val close_started : t -> unit Deferred.t
val close_finished : t -> unit Deferred.t
val is_closed : t -> bool
val is_open : t -> bool
val with_close : t -> f:(unit -> 'a Deferred.t) -> 'a Deferred.t
(** [can_write t] returns [true] if calls to [write*] functions on [t] are allowed. If
[is_open t] then [can_write t]. But one can have [is_closed t] and [can_write t],
during the time after [close t] before closing has finished. *)
val can_write : t -> bool
(** Errors raised within the writer can stop the background job that flushes out the
writer's buffers. [is_stopped_permanently] returns [true] when the background job has
stopped. [stopped_permanently] becomes determined when the background job has
stopped. *)
val is_stopped_permanently : t -> bool
val stopped_permanently : t -> unit Deferred.t
(** In addition to flushing its internal buffer prior to closing, a writer keeps track of
producers that are feeding it data, so that when [Writer.close] is called, it does the
following:
+ requests that the writer's producers flush their data to it
+ flushes the writer's internal buffer
+ calls [Unix.close] on the writer's underlying file descriptor
[with_flushed_at_close t ~flushed ~f] calls [f] and adds [flushed] to the set of
producers that should be flushed-at-close, for the duration of [f]. *)
val with_flushed_at_close
: t
-> flushed:(unit -> unit Deferred.t)
-> f:(unit -> 'a Deferred.t)
-> 'a Deferred.t
(** [bytes_to_write t] returns how many bytes have been requested to write but have not
yet been written. *)
val bytes_to_write : t -> int
(** [bytes_written t] returns how many bytes have been written. *)
val bytes_written : t -> Int63.t
(** [bytes_received t] returns how many bytes have been received by the writer. As long
as the writer is running, [bytes_received = bytes_written + bytes_to_write]. *)
val bytes_received : t -> Int63.t
* [ with_file_atomic ? temp_file ? perm ? fsync ? replace_special file ~f ] creates a writer
to a temp file , feeds that writer to [ f ] , and when the result of [ f ] becomes
determined , atomically moves ( using [ Unix.rename ] ) the temp file to [ file ] . If [ file ]
currently exists and is a regular file ( see below regarding [ replace_special ] ) it will
be replaced , even if it is read - only .
The temp file will be [ file ] ( or [ temp_file ] if supplied ) suffixed by a unique random
sequence of six characters . The temp file will be removed if an exception is raised to
the monitor of [ f ] before the result of [ f ] becomes determined . However , if the
program exits for some other reason , the temp file may not be cleaned up ; so it may be
prudent to choose a temp file that can be easily found by cleanup tools .
If [ fsync ] is [ true ] , the temp file will be flushed to disk before it takes the place
of the target file , thus guaranteeing that the target file will always be in a sound
state , even after a machine crash . Since synchronization is extremely slow , this is
not the default . Think carefully about the event of machine crashes and whether you
may need this option !
If [ replace_special ] is [ false ] ( the default ) an existing special [ file ] ( block or
character device , socket or FIFO ) will not be replaced by a regular file , the
temporary file is not created and an exception is raised . To explicitly replace an
existing special [ file ] , [ replace_special ] must be passed as [ true ] . Note that
if [ file ] exists and is a directory , the rename will fail ; if [ file ] exists and is
a symbolic link , the link will be replaced , not the target ( as per [ Unix.rename ] ) .
We intend for [ with_file_atomic ] to mimic the behavior of the [ open ] system call , so
if [ file ] does not exist , we will apply the current umask to [ perm ] ( the effective
permissions become [ perm land lnot umask ] , see [ man 2 open ] ) . However , if [ file ] does
exist and [ perm ] is specified , we do something different from [ open ] system call : we
override the permission with [ perm ] , ignoring the umask . This means that if you
create and then immediately overwrite the file with [ with_file_atomic ~perm ] , then the
umask will be honored the first time and ignored the second time . If [ perm ] is not
specified , then any existing file permissions are preserved .
If [ f ] closes the writer passed to it , [ with_file_atomic ] raises and does not create
[ file ] .
to a temp file, feeds that writer to [f], and when the result of [f] becomes
determined, atomically moves (using [Unix.rename]) the temp file to [file]. If [file]
currently exists and is a regular file (see below regarding [replace_special]) it will
be replaced, even if it is read-only.
The temp file will be [file] (or [temp_file] if supplied) suffixed by a unique random
sequence of six characters. The temp file will be removed if an exception is raised to
the monitor of [f] before the result of [f] becomes determined. However, if the
program exits for some other reason, the temp file may not be cleaned up; so it may be
prudent to choose a temp file that can be easily found by cleanup tools.
If [fsync] is [true], the temp file will be flushed to disk before it takes the place
of the target file, thus guaranteeing that the target file will always be in a sound
state, even after a machine crash. Since synchronization is extremely slow, this is
not the default. Think carefully about the event of machine crashes and whether you
may need this option!
If [replace_special] is [false] (the default) an existing special [file] (block or
character device, socket or FIFO) will not be replaced by a regular file, the
temporary file is not created and an exception is raised. To explicitly replace an
existing special [file], [replace_special] must be passed as [true]. Note that
if [file] exists and is a directory, the rename will fail; if [file] exists and is
a symbolic link, the link will be replaced, not the target (as per [Unix.rename]).
We intend for [with_file_atomic] to mimic the behavior of the [open] system call, so
if [file] does not exist, we will apply the current umask to [perm] (the effective
permissions become [perm land lnot umask], see [man 2 open]). However, if [file] does
exist and [perm] is specified, we do something different from [open] system call: we
override the permission with [perm], ignoring the umask. This means that if you
create and then immediately overwrite the file with [with_file_atomic ~perm], then the
umask will be honored the first time and ignored the second time. If [perm] is not
specified, then any existing file permissions are preserved.
If [f] closes the writer passed to it, [with_file_atomic] raises and does not create
[file].
*)
val with_file_atomic
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> ?time_source:[> read ] Time_source.T1.t
(** default is [Time_source.wall_clock ()] *)
-> string
-> f:(t -> 'a Deferred.t)
-> 'a Deferred.t
(** [save] is a special case of [with_file_atomic] that atomically writes the given
string to the specified file. *)
val save
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> string
-> contents:string
-> unit Deferred.t
(** [save_lines file lines] writes all lines in [lines] to [file], with each line followed
by a newline. *)
val save_lines
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> string
-> string list
-> unit Deferred.t
(** [save_sexp] is a special case of [with_file_atomic] that atomically writes the
given sexp to the specified file.
[save_sexp t sexp] writes [sexp] to [t], followed by a newline. To read a file
produced using [save_sexp], one would typically use [Reader.load_sexp], which deals
with the additional whitespace and works nicely with converting the sexp to a
value. *)
val save_sexp
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> ?hum:bool (** default is [true] *)
-> string
-> Sexp.t
-> unit Deferred.t
* [ save_sexps ] works similarly to [ save_sexp ] , but saves a sequence of sexps instead ,
separated by newlines . There is a corresponding [ ] for reading back
in .
separated by newlines. There is a corresponding [Reader.load_sexps] for reading back
in. *)
val save_sexps
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> ?hum:bool (** default is [true] *)
-> string
-> Sexp.t list
-> unit Deferred.t
(** [save_sexps_conv] is like [save_sexps], but converts to sexps internally, one at a
time. This avoids allocating the list of sexps up front, which can be costly. The
default values of the parameters are the same as [save_sexps]. *)
val save_sexps_conv
: ?temp_file:string
-> ?perm:int
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> ?hum:bool
-> string
-> 'a list
-> ('a -> Sexp.t)
-> unit Deferred.t
(** [save_bin_prot t bin_writer 'a] is a special case of [with_file_atomic] that writes
['a] to [t] using its bin_writer, in the
size-prefixed format, like [write_bin_prot]. To read a file produced using
[save_bin_prot], one would typically use [Reader.load_bin_prot]. *)
val save_bin_prot
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?fsync:bool (** default is [false] *)
-> ?replace_special:bool (** default is [false] *)
-> string
-> 'a Bin_prot.Type_class.writer
-> 'a
-> unit Deferred.t
* [ transfer ' t pipe_r f ] repeatedly reads values from [ pipe_r ] and feeds them to [ f ] ,
which should in turn write them to [ t ] . It provides pushback to [ pipe_r ] by not
reading when [ t ] can not keep up with the data being pushed in .
By default , each read from [ pipe_r ] reads all the values in [ pipe_r ] . One can supply
[ max_num_values_per_read ] to limit the number of values per read .
The [ transfer ' ] stops and the result becomes determined when [ stop ] becomes
determined , when [ pipe_r ] reaches its EOF , when [ t ] is closed , or when [ t ] 's consumer
leaves . In the latter two cases , [ transfer ' ] closes [ pipe_r ] .
[ transfer ' ] causes [ Pipe.flushed ] on [ pipe_r ] 's writer to ensure that the bytes have
been flushed to [ t ] before returning . It also waits on [ Pipe.upstream_flushed ] at
shutdown .
[ transfer t pipe_r f ] is equivalent to :
{ [
transfer ' t pipe_r ( fun q - > Queue.iter q ~f ; return ( ) ) ] }
which should in turn write them to [t]. It provides pushback to [pipe_r] by not
reading when [t] cannot keep up with the data being pushed in.
By default, each read from [pipe_r] reads all the values in [pipe_r]. One can supply
[max_num_values_per_read] to limit the number of values per read.
The [transfer'] stops and the result becomes determined when [stop] becomes
determined, when [pipe_r] reaches its EOF, when [t] is closed, or when [t]'s consumer
leaves. In the latter two cases, [transfer'] closes [pipe_r].
[transfer'] causes [Pipe.flushed] on [pipe_r]'s writer to ensure that the bytes have
been flushed to [t] before returning. It also waits on [Pipe.upstream_flushed] at
shutdown.
[transfer t pipe_r f] is equivalent to:
{[
transfer' t pipe_r (fun q -> Queue.iter q ~f; return ()) ]} *)
val transfer'
: ?stop:unit Deferred.t
-> ?max_num_values_per_read:int
-> t
-> 'a Pipe.Reader.t
-> ('a Queue.t -> unit Deferred.t)
-> unit Deferred.t
val transfer
: ?stop:unit Deferred.t
-> ?max_num_values_per_read:int
-> t
-> 'a Pipe.Reader.t
-> ('a -> unit)
-> unit Deferred.t
(** [pipe t] returns the writing end of a pipe attached to [t] that pushes back when [t]
cannot keep up with the data being pushed in. Closing the pipe does not close [t]. *)
val pipe : t -> string Pipe.Writer.t
* [ behave_nicely_in_pipeline ~writers ( ) ] causes the program to exit with status 141
( indicating ) if any of the consumers of [ writers ] go away . It also sets the
buffer age to unlimited , in case there is a human ( e.g. , using [ less ] ) on the other
side of the pipeline .
This can be called at the toplevel of a program , before [ Command.run ] for instance .
( this function does n't start the async scheduler ) .
(indicating SIGPIPE) if any of the consumers of [writers] go away. It also sets the
buffer age to unlimited, in case there is a human (e.g., using [less]) on the other
side of the pipeline.
This can be called at the toplevel of a program, before [Command.run] for instance.
(this function doesn't start the async scheduler). *)
val behave_nicely_in_pipeline
: ?writers:t list (** defaults to [stdout; stderr] *)
-> unit
-> unit
* [ set_synchronous_out_channel t out_channel ] waits until [ byte_to_write t = 0 ] , and
then mutates [ t ] so that all future writes to [ t ] synchronously call
[ Out_channel.output * ] functions to send data to the OS immediately .
[ set_synchronous_out_channel ] is used by expect tests to ensure that the interleaving
between calls to [ Core.printf ] ( and similar IO functions ) and [ Async.printf ] generates
output with the same interleaving . [ set_synchronous_out_channel ] is idempotent .
then mutates [t] so that all future writes to [t] synchronously call
[Out_channel.output*] functions to send data to the OS immediately.
[set_synchronous_out_channel] is used by expect tests to ensure that the interleaving
between calls to [Core.printf] (and similar IO functions) and [Async.printf] generates
output with the same interleaving. [set_synchronous_out_channel] is idempotent. *)
val set_synchronous_out_channel : t -> Out_channel.t -> unit Deferred.t
(** [using_synchronous_backing_out_channel t = true] if writes to [t] are being done
synchronously, e.g., due to [set_synchronous_out_channel],
[set_synchronous_backing_out_channel], [use_synchronous_stdout_and_stderr]. *)
val using_synchronous_backing_out_channel : t -> bool
(** [clear_synchronous_out_channel t] restores [t] to its normal state, with the
background writer asynchronously feeding data to the OS.
[clear_synchronous_out_channel] is idempotent. *)
val clear_synchronous_out_channel : t -> unit
val with_synchronous_out_channel
: t
-> Out_channel.t
-> f:(unit -> 'a Deferred.t)
-> 'a Deferred.t
* [ use_synchronous_stdout_and_stderr ( ) ] causes all subsequent writes to
stdout and stderr to occur synchronously ( after any pending writes have
flushed ) .
This ensures [ printf]-family writes happen immediately , which avoids two
common sources of confusion :
{ ul
{ li unexpected interleaving of [ Core.printf ] and [ Async.printf ] calls ; and }
[ Async.printf ] calls that do n't get flushed before an application exits } }
The disadvantages are :
{ ul
this makes writes blocking , which can delay unrelated asynchronous jobs until
the consumer stops pushing back ; and }
the errors raised by write are different and it wo n't respect
{ ! behave_nicely_in_pipeline } anymore } }
stdout and stderr to occur synchronously (after any pending writes have
flushed).
This ensures [printf]-family writes happen immediately, which avoids two
common sources of confusion:
{ul
{li unexpected interleaving of [Core.printf] and [Async.printf] calls; and}
{li [Async.printf] calls that don't get flushed before an application exits}}
The disadvantages are:
{ul
{li this makes writes blocking, which can delay unrelated asynchronous jobs until
the consumer stops pushing back; and}
{li the errors raised by write are different and it won't respect
{!behave_nicely_in_pipeline} anymore}} *)
val use_synchronous_stdout_and_stderr : unit -> unit Deferred.t
(** [Backing_out_channel] generalizes [Out_channel] to a narrow interface that can be used
to collect strings, etc. *)
module Backing_out_channel : sig
type t [@@deriving sexp_of]
val create
: output_char:(char -> unit)
-> output_chars:(bigstring -> len:int -> unit)
-> flush:(unit -> unit)
-> sexp:(unit -> Sexp.t)
-> t
val of_out_channel : Out_channel.t -> t
val of_output_char : (char -> unit) -> t
end
val set_synchronous_backing_out_channel : t -> Backing_out_channel.t -> unit Deferred.t
val with_synchronous_backing_out_channel
: t
-> Backing_out_channel.t
-> f:(unit -> 'a Deferred.t)
-> 'a Deferred.t
(**/**)
module Private : sig
module Check_buffer_age : sig
module Internal_for_unit_test : sig
val check_now : check_invariants:bool -> time_source:Time_source.t -> unit
val num_active_checks_for : Time_source.t -> int option
end
end
val set_bytes_received : t -> Int63.t -> unit
val set_bytes_written : t -> Int63.t -> unit
end
end
module type Writer = sig
include Writer0
(** [of_pipe info pipe_w] returns a writer [t] such that data written to [t] will appear
on [pipe_w]. If either [t] or [pipe_w] are closed, the other is closed as well.
[of_pipe] is implemented by attaching [t] to the write-end of a Unix pipe, and
shuttling bytes from the read-end of the Unix pipe to [pipe_w]. *)
val of_pipe
: ?time_source:[> read ] Time_source.T1.t
(** default is [Time_source.wall_clock ()] *)
-> Info.t
-> string Pipe.Writer.t
-> (t * [ `Closed_and_flushed_downstream of unit Deferred.t ]) Deferred.t
end
| null | https://raw.githubusercontent.com/janestreet/async_unix/e5d9e9d388a23237cec3bf42d7e310c459de4309/src/writer_intf.ml | ocaml | * default is [true]
* default is [Unix]
* default is [Time_source.wall_clock ()]
* [set_buffer_age_limit t buffer_age_limit] replaces the existing buffer age limit with
the new one. This is useful for stdout and stderr, which are lazily created in a
context that does not allow applications to specify [buffer_age_limit].
* [consumer_left t] returns a deferred that becomes determined when [t] attempts to
write to a pipe that broke because the consumer on the other side left.
* [open_file file] opens [file] for writing and returns a writer for it. It uses
[Unix_syscalls.openfile] to open the file.
* for errors. Defaults to the file path.
* default is [false], meaning truncate instead
* default is [Unix]
* default is [Time_source.wall_clock ()]
* default is [false], meaning truncate instead
* default is [false]
* default is [Unix]
* default is [Time_source.wall_clock ()]
* [id] returns an id for this writer that is unique among all other writers.
* [fd] returns the [Fd.t] used to create this writer.
* [write_direct t ~f] gives [t]'s internal buffer to [f]. [pos] and [len] define the
portion of the buffer that can be filled. [f] must return a pair [(x, written)] where
[written] is the number of bytes written to the buffer at [pos]. [write_direct]
raises if [written < 0 || written > len]. [write_direct] returns [Some x], or [None]
if the writer is stopped. By using [write_direct] only, one can ensure that the
writer's internal buffer never grows. Look at the [write_direct] expect tests for an
example of how this can be used to construct a [write_string] like function that never
grows the internal buffer.
* [write ?pos ?len t s] adds a job to the writer's queue of pending writes. The
contents of the string are copied to an internal buffer before [write] returns, so
clients can do whatever they want with [s] after that.
* [to_formatter] returns an OCaml-formatter that one can print to using
{!Format.fprintf}. Note that flushing the formatter will only submit all buffered
data to the writer, but does {e not} guarantee flushing to the operating system.
* [write_char t c] writes the character.
* [newline t] writes the end-of-line terminator. [line_ending] can override [t]'s
[line_ending].
* [write_line t s ?line_ending] is [write t s; newline t ?line_ending].
* [write_sexp t sexp] writes to [t] the string representation of [sexp], possibly
followed by a terminating character as per [Terminate_with]. With
[~terminate_with:Newline], the terminating character is a newline. With
[~terminate_with:Space_if_needed], if a space is needed to ensure that the sexp reader
knows that it has reached the end of the sexp, then the terminating character will be
a space; otherwise, no terminating character is added. A terminating space is needed
if the string representation doesn't end in [')'] or ['"'].
* default is [false]
* default is [Space_if_needed]
* [schedule_iobuf_peek] is like [schedule_bigstring], but for an iobuf. It is not safe
to change the iobuf until the writer has been successfully flushed or closed after
this operation.
* [schedule_iobuf_consume] is like [schedule_iobuf_peek]. Once the result is determined,
the iobuf will be fully consumed (or advanced by [min len (Iobuf.length iobuf)] if
[len] is specified), and the writer will be flushed.
* default is [Keep]
* [Error] is accompanied by a detailed error being sent to the writer's monitor.
* The time just after the [write()] system call returned or
the time [flushed_*] was called if all the writes were already flushed by then.
* [flushed_or_failed_unit t] returns a deferred that will become
determined when all prior writes complete, or when any of them fail.
Unlike {!flushed_or_failed_with_result}, its return value gives you no indication of
which happened. In the [Error] case, the result will be determined in parallel with
the error propagating to the writer's monitor. The caller should robustly handle
either side winning that race.
* [flushed t] returns a deferred that will become determined when all prior writes
complete (i.e. the [write()] system call returns). If a prior write fails, then the
deferred will never become determined.
It is OK to call [flushed t] after [t] has been closed.
* [fsync t] calls [flushed t] before calling [Unix.fsync] on the underlying
file descriptor
* [fdatasync t] calls [flushed t] before calling [Unix.fdatasync] on the
underlying file descriptor
* [send] writes a string to the writer that can be read back using [Reader.recv].
* [monitor t] returns the writer's monitor.
* [can_write t] returns [true] if calls to [write*] functions on [t] are allowed. If
[is_open t] then [can_write t]. But one can have [is_closed t] and [can_write t],
during the time after [close t] before closing has finished.
* Errors raised within the writer can stop the background job that flushes out the
writer's buffers. [is_stopped_permanently] returns [true] when the background job has
stopped. [stopped_permanently] becomes determined when the background job has
stopped.
* In addition to flushing its internal buffer prior to closing, a writer keeps track of
producers that are feeding it data, so that when [Writer.close] is called, it does the
following:
+ requests that the writer's producers flush their data to it
+ flushes the writer's internal buffer
+ calls [Unix.close] on the writer's underlying file descriptor
[with_flushed_at_close t ~flushed ~f] calls [f] and adds [flushed] to the set of
producers that should be flushed-at-close, for the duration of [f].
* [bytes_to_write t] returns how many bytes have been requested to write but have not
yet been written.
* [bytes_written t] returns how many bytes have been written.
* [bytes_received t] returns how many bytes have been received by the writer. As long
as the writer is running, [bytes_received = bytes_written + bytes_to_write].
* default is [false]
* default is [false]
* default is [Time_source.wall_clock ()]
* [save] is a special case of [with_file_atomic] that atomically writes the given
string to the specified file.
* default is [false]
* default is [false]
* [save_lines file lines] writes all lines in [lines] to [file], with each line followed
by a newline.
* default is [false]
* default is [false]
* [save_sexp] is a special case of [with_file_atomic] that atomically writes the
given sexp to the specified file.
[save_sexp t sexp] writes [sexp] to [t], followed by a newline. To read a file
produced using [save_sexp], one would typically use [Reader.load_sexp], which deals
with the additional whitespace and works nicely with converting the sexp to a
value.
* default is [false]
* default is [false]
* default is [true]
* default is [false]
* default is [false]
* default is [true]
* [save_sexps_conv] is like [save_sexps], but converts to sexps internally, one at a
time. This avoids allocating the list of sexps up front, which can be costly. The
default values of the parameters are the same as [save_sexps].
* default is [false]
* default is [false]
* [save_bin_prot t bin_writer 'a] is a special case of [with_file_atomic] that writes
['a] to [t] using its bin_writer, in the
size-prefixed format, like [write_bin_prot]. To read a file produced using
[save_bin_prot], one would typically use [Reader.load_bin_prot].
* default is [false]
* default is [false]
* [pipe t] returns the writing end of a pipe attached to [t] that pushes back when [t]
cannot keep up with the data being pushed in. Closing the pipe does not close [t].
* defaults to [stdout; stderr]
* [using_synchronous_backing_out_channel t = true] if writes to [t] are being done
synchronously, e.g., due to [set_synchronous_out_channel],
[set_synchronous_backing_out_channel], [use_synchronous_stdout_and_stderr].
* [clear_synchronous_out_channel t] restores [t] to its normal state, with the
background writer asynchronously feeding data to the OS.
[clear_synchronous_out_channel] is idempotent.
* [Backing_out_channel] generalizes [Out_channel] to a narrow interface that can be used
to collect strings, etc.
*/*
* [of_pipe info pipe_w] returns a writer [t] such that data written to [t] will appear
on [pipe_w]. If either [t] or [pipe_w] are closed, the other is closed as well.
[of_pipe] is implemented by attaching [t] to the write-end of a Unix pipe, and
shuttling bytes from the read-end of the Unix pipe to [pipe_w].
* default is [Time_source.wall_clock ()] | open! Core
open! Import
module type Writer0 = sig
* [ Writer ] is Async 's main API for output to a file descriptor . It is the analog of
[ Core . Out_channel ] .
Each writer has an internal buffer , to which [ Writer.write * ] adds data . Each writer
uses an Async cooperative thread that makes [ write ( ) ] system calls to move the data
from the writer 's buffer to an OS buffer via the file descriptor .
There is no guarantee that the data sync on the other side of the writer can keep up
with the rate at which you are writing . If it can not , the OS buffer will fill up and
the writer 's cooperative thread will be unable to send any bytes . In that case , calls
to [ Writer.write * ] will grow the writer 's buffer without bound , as long as your
program produces data . One solution to this problem is to call [ Writer.flushed ] and
not continue until that becomes determined , which will only happen once the bytes in
the writer 's buffer have been successfully transferred to the OS buffer . Another
solution is to check [ Writer.bytes_to_write ] and not produce any more data if that is
beyond some bound .
There are two kinds of errors that one can handle with writers . First , a writer can be
[ close]d , which will cause future [ write]s ( and other operations ) to synchronously
raise an exception . Second , the writer 's cooperative thread can fail due to a
[ write ( ) ] system call failing . This will cause an exception to be sent to the writer 's
monitor , which will be a child of the monitor in effect when the writer is created .
One can deal with such asynchronous exceptions in the usual way , by handling the
stream returned by [ Monitor.detach_and_get_error_stream ( Writer.monitor writer ) ] .
[Core.Out_channel].
Each writer has an internal buffer, to which [Writer.write*] adds data. Each writer
uses an Async cooperative thread that makes [write()] system calls to move the data
from the writer's buffer to an OS buffer via the file descriptor.
There is no guarantee that the data sync on the other side of the writer can keep up
with the rate at which you are writing. If it cannot, the OS buffer will fill up and
the writer's cooperative thread will be unable to send any bytes. In that case, calls
to [Writer.write*] will grow the writer's buffer without bound, as long as your
program produces data. One solution to this problem is to call [Writer.flushed] and
not continue until that becomes determined, which will only happen once the bytes in
the writer's buffer have been successfully transferred to the OS buffer. Another
solution is to check [Writer.bytes_to_write] and not produce any more data if that is
beyond some bound.
There are two kinds of errors that one can handle with writers. First, a writer can be
[close]d, which will cause future [write]s (and other operations) to synchronously
raise an exception. Second, the writer's cooperative thread can fail due to a
[write()] system call failing. This will cause an exception to be sent to the writer's
monitor, which will be a child of the monitor in effect when the writer is created.
One can deal with such asynchronous exceptions in the usual way, by handling the
stream returned by [Monitor.detach_and_get_error_stream (Writer.monitor writer)]. *)
module Id : Unique_id
module Line_ending : sig
type t =
| Dos
| Unix
[@@deriving sexp_of]
end
type t [@@deriving sexp_of]
include Invariant.S with type t := t
* [ stdout ] and [ stderr ] are writers for file descriptors 1 and 2 . They are lazy because
we do n't want to create them in all programs that happen to link with Async .
When either [ stdout ] or [ stderr ] is created , they both are created . Furthermore , if
they point to the same inode , then they will be the same writer to [ Fd.stdout ] . This
can be confusing , because [ fd ( force stderr ) ] will be [ Fd.stdout ] , not [ Fd.stderr ] .
And subsequent modifications of [ Fd.stderr ] will have no effect on [ Writer.stderr ] .
Unfortunately , the sharing is necessary because Async uses OS threads to do [ write ( ) ]
syscalls using the writer buffer . When calling a program that redirects stdout and
stderr to the same file , as in :
{ v
foo.exe > /tmp / z.file 2>&1
v }
if [ Writer.stdout ] and [ Writer.stderr ] were n't the same writer , then they could have
threads simultaneously writing to the same file , which could easily cause data
loss .
we don't want to create them in all programs that happen to link with Async.
When either [stdout] or [stderr] is created, they both are created. Furthermore, if
they point to the same inode, then they will be the same writer to [Fd.stdout]. This
can be confusing, because [fd (force stderr)] will be [Fd.stdout], not [Fd.stderr].
And subsequent modifications of [Fd.stderr] will have no effect on [Writer.stderr].
Unfortunately, the sharing is necessary because Async uses OS threads to do [write()]
syscalls using the writer buffer. When calling a program that redirects stdout and
stderr to the same file, as in:
{v
foo.exe >/tmp/z.file 2>&1
v}
if [Writer.stdout] and [Writer.stderr] weren't the same writer, then they could have
threads simultaneously writing to the same file, which could easily cause data
loss. *)
val stdout : t Lazy.t
val stderr : t Lazy.t
type buffer_age_limit =
[ `At_most of Time.Span.t
| `Unlimited
]
[@@deriving bin_io, sexp]
* [ create ? buf_len ? syscall ? buffer_age_limit fd ] creates a new writer . The file
descriptor [ fd ] should not be in use for writing by anything else .
By default , a write system call occurs at the end of a cycle in which bytes were
written . One can supply [ ~syscall:(`Periodic span ) ] to get better performance . This
batches writes together , doing the write system call periodically according to the
supplied span .
A writer can asynchronously fail if the underlying write syscall returns an error ,
e.g. , [ EBADF ] , [ EPIPE ] , [ ] , ....
[ buffer_age_limit ] specifies how backed up you can get before raising an exception .
The default is [ ` Unlimited ] for files , and 2 minutes for other kinds of file
descriptors . You can supply [ ` Unlimited ] to turn off buffer - age checks .
[ raise_when_consumer_leaves ] specifies whether the writer should raise an exception
when the consumer receiving bytes from the writer leaves , i.e. , in Unix , the write
syscall returns [ EPIPE ] or [ ] . If [ not raise_when_consumer_leaves ] , then
the writer will silently drop all writes after the consumer leaves , and the writer
will eventually fail with a writer - buffer - older - than error if the application remains
open long enough .
[ line_ending ] determines how [ newline ] and [ write_line ] terminate lines by default .
If [ line_ending = Unix ] then end of line is [ " \n " ] ; if [ line_ending = Dos ] then end of
line is [ " \r\n " ] . Note that [ line_ending = Dos ] is not equivalent to opening the file
in text mode because any " \n " characters being printed by other means ( e.g. , [ write
" \n " ] ) are still written verbatim ( in Unix style ) .
[ time_source ] is useful in tests to trigger [ buffer_age_limit]-related conditions , or
simply to have the result of ( for example ) [ flushed_time_ns ] agree with your test 's
synthetic time . It is also used to schedule the [ ` Periodic ] syscalls .
descriptor [fd] should not be in use for writing by anything else.
By default, a write system call occurs at the end of a cycle in which bytes were
written. One can supply [~syscall:(`Periodic span)] to get better performance. This
batches writes together, doing the write system call periodically according to the
supplied span.
A writer can asynchronously fail if the underlying write syscall returns an error,
e.g., [EBADF], [EPIPE], [ECONNRESET], ....
[buffer_age_limit] specifies how backed up you can get before raising an exception.
The default is [`Unlimited] for files, and 2 minutes for other kinds of file
descriptors. You can supply [`Unlimited] to turn off buffer-age checks.
[raise_when_consumer_leaves] specifies whether the writer should raise an exception
when the consumer receiving bytes from the writer leaves, i.e., in Unix, the write
syscall returns [EPIPE] or [ECONNRESET]. If [not raise_when_consumer_leaves], then
the writer will silently drop all writes after the consumer leaves, and the writer
will eventually fail with a writer-buffer-older-than error if the application remains
open long enough.
[line_ending] determines how [newline] and [write_line] terminate lines by default.
If [line_ending = Unix] then end of line is ["\n"]; if [line_ending = Dos] then end of
line is ["\r\n"]. Note that [line_ending = Dos] is not equivalent to opening the file
in text mode because any "\n" characters being printed by other means (e.g., [write
"\n"]) are still written verbatim (in Unix style).
[time_source] is useful in tests to trigger [buffer_age_limit]-related conditions, or
simply to have the result of (for example) [flushed_time_ns] agree with your test's
synthetic time. It is also used to schedule the [`Periodic] syscalls. *)
val create
: ?buf_len:int
-> ?syscall:[ `Per_cycle | `Periodic of Time.Span.t ]
-> ?buffer_age_limit:buffer_age_limit
-> ?time_source:[> read ] Time_source.T1.t
-> Fd.t
-> t
val raise_when_consumer_leaves : t -> bool
* [ set_raise_when_consumer_leaves t bool ] sets the [ raise_when_consumer_leaves ] flag of
[ t ] , which determies how [ t ] responds to a write system call raising [ EPIPE ] and
[ ] ( see [ create ] ) .
[t], which determies how [t] responds to a write system call raising [EPIPE] and
[ECONNRESET] (see [create]). *)
val set_raise_when_consumer_leaves : t -> bool -> unit
val set_buffer_age_limit : t -> buffer_age_limit -> unit
val consumer_left : t -> unit Deferred.t
val of_out_channel : Out_channel.t -> Fd.Kind.t -> t
val open_file
-> ?buf_len:int
-> ?syscall:[ `Per_cycle | `Periodic of Time.Span.t ]
* default is [ 0o666 ]
-> ?time_source:[> read ] Time_source.T1.t
-> string
-> t Deferred.t
* [ with_file ~file f ] opens [ file ] for writing , creates a writer [ t ] , and runs [ f t ] to
obtain a deferred [ d ] . When [ d ] becomes determined , the writer is closed . When the
close completes , the result of [ with_file ] becomes determined with the value of [ d ] .
There is no need to call [ Writer.flushed ] to ensure that [ with_file ] waits for the
writer to be flushed before closing it . [ Writer.close ] will already wait for the
flush .
[ exclusive = true ] uses a filesystem lock to try and make sure that the file is not
modified during a concurrent read or write operation . This is an advisory lock , which
means that the reader must be cooperating by taking a relevant lock when writing ( see
[ Reader.with_file ] ) . This is unrelated and should not be confused with the [ O_EXCL ]
flag in [ open ] systemcall . Note that the implementation uses [ Unix.lockf ] , which has
known pitfalls . It 's recommended that you avoid the [ exclusive ] flag in favor of
using a library dedicated to dealing with file locks where the pitfalls can be
documented in detail .
obtain a deferred [d]. When [d] becomes determined, the writer is closed. When the
close completes, the result of [with_file] becomes determined with the value of [d].
There is no need to call [Writer.flushed] to ensure that [with_file] waits for the
writer to be flushed before closing it. [Writer.close] will already wait for the
flush.
[exclusive = true] uses a filesystem lock to try and make sure that the file is not
modified during a concurrent read or write operation. This is an advisory lock, which
means that the reader must be cooperating by taking a relevant lock when writing (see
[Reader.with_file]). This is unrelated and should not be confused with the [O_EXCL]
flag in [open] systemcall. Note that the implementation uses [Unix.lockf], which has
known pitfalls. It's recommended that you avoid the [exclusive] flag in favor of
using a library dedicated to dealing with file locks where the pitfalls can be
documented in detail.
*)
val with_file
* default is [ 0o666 ]
-> ?syscall:[ `Per_cycle | `Periodic of Time.Span.t ]
-> ?time_source:[> read ] Time_source.T1.t
-> string
-> f:(t -> 'a Deferred.t)
-> 'a Deferred.t
val id : t -> Id.t
val fd : t -> Fd.t
* [ set_fd t fd ] sets the [ fd ] used by [ t ] for its underlying system calls . It first
waits until everything being sent to the current [ fd ] is flushed . Of course , one must
understand how the writer works and what one is doing to use this .
waits until everything being sent to the current [fd] is flushed. Of course, one must
understand how the writer works and what one is doing to use this. *)
val set_fd : t -> Fd.t -> unit Deferred.t
* [ write_gen t a ] writes [ a ] to writer [ t ] , with [ length ] specifying the number of bytes
needed and [ blit_to_bigstring ] blitting [ a ] directly into the [ t ] 's buffer . If one
has a type that has [ length ] and [ blit_to_bigstring ] functions , like :
{ [
module A : sig
type t
val length : t - > int
val blit_to_bigstring : ( t , Bigstring.t ) Blit.blit
end ] }
then one can use [ write_gen ] to implement a custom analog of [ Writer.write ] , like :
{ [
module Write_a : sig
write : ? pos : int - > ? len : int - > A.t - > Writer.t - > unit
end = struct
let write ? pos ? len a writer =
Writer.write_gen
: A.length
~blit_to_bigstring : A.blit_to_bigstring
? pos ? len writer a
end ] }
In some cases it may be difficult to write only part of a value :
{ [
module B : sig
type t
val length : t - > int
val blit_to_bigstring : t - > Bigstring.t - > pos : int - > unit
end ] }
In these cases , use [ write_gen_whole ] instead . It never requires writing only part of
a value , although it is potentially less space - efficient . It may waste portions of
previously - allocated write buffers if they are too small .
{ [
module Write_b : sig
write : B.t - > Writer.t - > unit
end = struct
let write b writer =
Writer.write_gen_whole
: B.length
~blit_to_bigstring : B.blit_to_bigstring
writer b
end ] }
Note : [ write_gen ] and [ write_gen_whole ] give you access to the writer 's internal
buffer . You should not capture it ; doing so might lead to errors of the segfault
kind .
needed and [blit_to_bigstring] blitting [a] directly into the [t]'s buffer. If one
has a type that has [length] and [blit_to_bigstring] functions, like:
{[
module A : sig
type t
val length : t -> int
val blit_to_bigstring : (t, Bigstring.t) Blit.blit
end ]}
then one can use [write_gen] to implement a custom analog of [Writer.write], like:
{[
module Write_a : sig
val write : ?pos:int -> ?len:int -> A.t -> Writer.t -> unit
end = struct
let write ?pos ?len a writer =
Writer.write_gen
~length:A.length
~blit_to_bigstring:A.blit_to_bigstring
?pos ?len writer a
end ]}
In some cases it may be difficult to write only part of a value:
{[
module B : sig
type t
val length : t -> int
val blit_to_bigstring : t -> Bigstring.t -> pos:int -> unit
end ]}
In these cases, use [write_gen_whole] instead. It never requires writing only part of
a value, although it is potentially less space-efficient. It may waste portions of
previously-allocated write buffers if they are too small.
{[
module Write_b : sig
val write : B.t -> Writer.t -> unit
end = struct
let write b writer =
Writer.write_gen_whole
~length:B.length
~blit_to_bigstring:B.blit_to_bigstring
writer b
end ]}
Note: [write_gen] and [write_gen_whole] give you access to the writer's internal
buffer. You should not capture it; doing so might lead to errors of the segfault
kind. *)
val write_gen
: ?pos:int
-> ?len:int
-> t
-> 'a
-> blit_to_bigstring:
(src:'a -> src_pos:int -> dst:Bigstring.t -> dst_pos:int -> len:int -> unit)
-> length:('a -> int)
-> unit
val write_gen_whole
: t
-> 'a
-> blit_to_bigstring:('a -> Bigstring.t -> pos:int -> unit)
-> length:('a -> int)
-> unit
val write_direct : t -> f:(Bigstring.t -> pos:int -> len:int -> 'a * int) -> 'a option
val write_bytes : ?pos:int -> ?len:int -> t -> Bytes.t -> unit
val write : ?pos:int -> ?len:int -> t -> string -> unit
val write_bigstring : ?pos:int -> ?len:int -> t -> Bigstring.t -> unit
val write_iobuf : ?pos:int -> ?len:int -> t -> ([> read ], _) Iobuf.t -> unit
val write_substring : t -> Substring.t -> unit
val write_bigsubstring : t -> Bigsubstring.t -> unit
val writef : t -> ('a, unit, string, unit) format4 -> 'a
val to_formatter : t -> Format.formatter
val write_char : t -> char -> unit
val newline : ?line_ending:Line_ending.t -> t -> unit
val write_line : ?line_ending:Line_ending.t -> t -> string -> unit
* [ write_byte t i ] writes one 8 - bit integer ( as the single character with that code ) .
The given integer is taken modulo 256 .
The given integer is taken modulo 256. *)
val write_byte : t -> int -> unit
module Terminate_with : sig
type t =
| Newline
| Space_if_needed
[@@deriving sexp_of]
end
val write_sexp
-> t
-> Sexp.t
-> unit
* [ write_bin_prot ] writes out a value using its bin_prot sizer / writer pair . The format
is the " size - prefixed binary protocol " , in which the length of the data is written
before the data itself . This is the format that [ ] reads .
is the "size-prefixed binary protocol", in which the length of the data is written
before the data itself. This is the format that [Reader.read_bin_prot] reads. *)
val write_bin_prot : t -> 'a Bin_prot.Type_class.writer -> 'a -> unit
* Writes out a value using its bin_prot writer . Unlike [ write_bin_prot ] , this does n't
prefix the output with the size of the bin_prot blob . [ size ] is the expected size .
This function will raise if the bin_prot writer writes an amount other than [ size ]
bytes .
prefix the output with the size of the bin_prot blob. [size] is the expected size.
This function will raise if the bin_prot writer writes an amount other than [size]
bytes. *)
val write_bin_prot_no_size_header
: t
-> size:int
-> 'a Bin_prot.Write.writer
-> 'a
-> unit
* Unlike the [ write _ ] functions , all functions starting with [ schedule _ ] require
flushing or closing of the writer after returning before it is safe to modify the
bigstrings which were directly or indirectly passed to these functions . The reason is
that these bigstrings will be read from directly when writing ; their contents is not
copied to internal buffers .
This is important if users need to send the same large data string to a huge number of
clients simultaneously ( e.g. , on a cluster ) , because these functions then avoid
needlessly exhausting memory by sharing the data .
flushing or closing of the writer after returning before it is safe to modify the
bigstrings which were directly or indirectly passed to these functions. The reason is
that these bigstrings will be read from directly when writing; their contents is not
copied to internal buffers.
This is important if users need to send the same large data string to a huge number of
clients simultaneously (e.g., on a cluster), because these functions then avoid
needlessly exhausting memory by sharing the data. *)
* [ schedule_bigstring t bstr ] schedules a write of ] . It is not safe to
change the bigstring until the writer has been successfully flushed or closed after
this operation .
change the bigstring until the writer has been successfully flushed or closed after
this operation. *)
val schedule_bigstring : t -> ?pos:int -> ?len:int -> Bigstring.t -> unit
val schedule_bigsubstring : t -> Bigsubstring.t -> unit
val schedule_iobuf_peek : t -> ?pos:int -> ?len:int -> ([> read ], _) Iobuf.t -> unit
val schedule_iobuf_consume
: t
-> ?len:int
-> ([> read ], Iobuf.seek) Iobuf.t
-> unit Deferred.t
module Destroy_or_keep : sig
type t =
| Destroy
| Keep
[@@deriving sexp_of]
end
* [ schedule_iovec t iovec ] schedules a write of I / O - vector [ ] . It is not safe to
change the bigstrings underlying the I / O - vector until the writer has been successfully
flushed or closed after this operation .
change the bigstrings underlying the I/O-vector until the writer has been successfully
flushed or closed after this operation. *)
val schedule_iovec
-> t
-> Bigstring.t Unix.IOVec.t
-> unit
* [ schedule_iovecs t iovecs ] like { ! } , but takes a whole queue [ iovecs ] of
I / O - vectors as argument . The queue is guaranteed to be empty when this function
returns and can be modified . It is not safe to change the bigstrings underlying the
I / O - vectors until the writer has been successfully flushed or closed after this
operation .
I/O-vectors as argument. The queue is guaranteed to be empty when this function
returns and can be modified. It is not safe to change the bigstrings underlying the
I/O-vectors until the writer has been successfully flushed or closed after this
operation. *)
val schedule_iovecs : t -> Bigstring.t Unix.IOVec.t Queue.t -> unit
module Flush_result : sig
type t =
| Error
| Consumer_left
* [ Consumer_left ] is returned when the consumer leaves ( see { ! consumer_left } ) and
{ ! is set to [ false ] . If that flag is set to [ true ] ,
then you get an [ Error ] instead .
{!raise_when_consumer_leaves} is set to [false]. If that flag is set to [true],
then you get an [Error] instead. *)
| Flushed of Time_ns.t
[@@deriving sexp_of]
end
* [ flushed_or_failed_with_result t ] returns a deferred that will become determined when
all prior writes complete ( i.e. the [ write ( ) ] system call returns ) , or when any of
them fail .
Handling the [ Error ] case can be tricky due to the following race : the result gets
determined concurrently with the exception propagation through the writer 's monitor .
The caller needs to make sure that the program behavior does not depend on which
signal propagates first .
all prior writes complete (i.e. the [write()] system call returns), or when any of
them fail.
Handling the [Error] case can be tricky due to the following race: the result gets
determined concurrently with the exception propagation through the writer's monitor.
The caller needs to make sure that the program behavior does not depend on which
signal propagates first.
*)
val flushed_or_failed_with_result : t -> Flush_result.t Deferred.t
val flushed_or_failed_unit : t -> unit Deferred.t
val flushed : t -> unit Deferred.t
val flushed_time : t -> Time.t Deferred.t
val flushed_time_ns : t -> Time_ns.t Deferred.t
val fsync : t -> unit Deferred.t
val fdatasync : t -> unit Deferred.t
val send : t -> string -> unit
val monitor : t -> Monitor.t
* [ close ? force_close t ] waits for the writer to be flushed , and then calls [ Unix.close ]
on the underlying file descriptor . [ force_close ] causes the [ Unix.close ] to happen
even if the flush hangs . By default [ force_close ] is [ Deferred.never ( ) ] for files
and [ after ( sec 5 ) ] for other types of file descriptors ( e.g. , sockets ) . If the close
is forced , data in the writer 's buffer may not be written to the file descriptor . You
can check this by calling [ bytes_to_write ] after [ close ] finishes .
WARNING : [ force_close ] will not reliably stop any write that is in progress .
If there are any in - flight system calls , it will wait for them to finish , which
includes [ writev ] , which can legitimately block forever .
[ close ] will raise an exception if the [ Unix.close ] on the underlying file descriptor
fails .
You must call [ close ] on a writer in order to close the underlying file descriptor .
Not doing so will cause a file descriptor leak . It also will cause a space leak ,
because until the writer is closed , it is held on to in order to flush the writer on
shutdown .
It is an error to call other operations on [ t ] after [ close t ] has been called , except
that calls of [ close ] subsequent to the original call to [ close ] will return the same
deferred as the original call .
[ close_started t ] becomes determined as soon as [ close ] is called .
[ close_finished t ] becomes determined after [ t ] 's underlying file descriptor has been
closed , i.e. , it is the same as the result of [ close ] . [ close_finished ] differs from
[ close ] in that it does not have the side effect of initiating a close .
[ is_closed t ] returns [ true ] iff [ close t ] has been called .
[ is_open t ] is [ not ( is_closed t ) ]
[ with_close t ~f ] runs [ f ( ) ] , and closes [ t ] after [ f ] finishes or raises .
on the underlying file descriptor. [force_close] causes the [Unix.close] to happen
even if the flush hangs. By default [force_close] is [Deferred.never ()] for files
and [after (sec 5)] for other types of file descriptors (e.g., sockets). If the close
is forced, data in the writer's buffer may not be written to the file descriptor. You
can check this by calling [bytes_to_write] after [close] finishes.
WARNING: [force_close] will not reliably stop any write that is in progress.
If there are any in-flight system calls, it will wait for them to finish, which
includes [writev], which can legitimately block forever.
[close] will raise an exception if the [Unix.close] on the underlying file descriptor
fails.
You must call [close] on a writer in order to close the underlying file descriptor.
Not doing so will cause a file descriptor leak. It also will cause a space leak,
because until the writer is closed, it is held on to in order to flush the writer on
shutdown.
It is an error to call other operations on [t] after [close t] has been called, except
that calls of [close] subsequent to the original call to [close] will return the same
deferred as the original call.
[close_started t] becomes determined as soon as [close] is called.
[close_finished t] becomes determined after [t]'s underlying file descriptor has been
closed, i.e., it is the same as the result of [close]. [close_finished] differs from
[close] in that it does not have the side effect of initiating a close.
[is_closed t] returns [true] iff [close t] has been called.
[is_open t] is [not (is_closed t)]
[with_close t ~f] runs [f ()], and closes [t] after [f] finishes or raises. *)
val close : ?force_close:unit Deferred.t -> t -> unit Deferred.t
val close_started : t -> unit Deferred.t
val close_finished : t -> unit Deferred.t
val is_closed : t -> bool
val is_open : t -> bool
val with_close : t -> f:(unit -> 'a Deferred.t) -> 'a Deferred.t
val can_write : t -> bool
val is_stopped_permanently : t -> bool
val stopped_permanently : t -> unit Deferred.t
val with_flushed_at_close
: t
-> flushed:(unit -> unit Deferred.t)
-> f:(unit -> 'a Deferred.t)
-> 'a Deferred.t
val bytes_to_write : t -> int
val bytes_written : t -> Int63.t
val bytes_received : t -> Int63.t
* [ with_file_atomic ? temp_file ? perm ? fsync ? replace_special file ~f ] creates a writer
to a temp file , feeds that writer to [ f ] , and when the result of [ f ] becomes
determined , atomically moves ( using [ Unix.rename ] ) the temp file to [ file ] . If [ file ]
currently exists and is a regular file ( see below regarding [ replace_special ] ) it will
be replaced , even if it is read - only .
The temp file will be [ file ] ( or [ temp_file ] if supplied ) suffixed by a unique random
sequence of six characters . The temp file will be removed if an exception is raised to
the monitor of [ f ] before the result of [ f ] becomes determined . However , if the
program exits for some other reason , the temp file may not be cleaned up ; so it may be
prudent to choose a temp file that can be easily found by cleanup tools .
If [ fsync ] is [ true ] , the temp file will be flushed to disk before it takes the place
of the target file , thus guaranteeing that the target file will always be in a sound
state , even after a machine crash . Since synchronization is extremely slow , this is
not the default . Think carefully about the event of machine crashes and whether you
may need this option !
If [ replace_special ] is [ false ] ( the default ) an existing special [ file ] ( block or
character device , socket or FIFO ) will not be replaced by a regular file , the
temporary file is not created and an exception is raised . To explicitly replace an
existing special [ file ] , [ replace_special ] must be passed as [ true ] . Note that
if [ file ] exists and is a directory , the rename will fail ; if [ file ] exists and is
a symbolic link , the link will be replaced , not the target ( as per [ Unix.rename ] ) .
We intend for [ with_file_atomic ] to mimic the behavior of the [ open ] system call , so
if [ file ] does not exist , we will apply the current umask to [ perm ] ( the effective
permissions become [ perm land lnot umask ] , see [ man 2 open ] ) . However , if [ file ] does
exist and [ perm ] is specified , we do something different from [ open ] system call : we
override the permission with [ perm ] , ignoring the umask . This means that if you
create and then immediately overwrite the file with [ with_file_atomic ~perm ] , then the
umask will be honored the first time and ignored the second time . If [ perm ] is not
specified , then any existing file permissions are preserved .
If [ f ] closes the writer passed to it , [ with_file_atomic ] raises and does not create
[ file ] .
to a temp file, feeds that writer to [f], and when the result of [f] becomes
determined, atomically moves (using [Unix.rename]) the temp file to [file]. If [file]
currently exists and is a regular file (see below regarding [replace_special]) it will
be replaced, even if it is read-only.
The temp file will be [file] (or [temp_file] if supplied) suffixed by a unique random
sequence of six characters. The temp file will be removed if an exception is raised to
the monitor of [f] before the result of [f] becomes determined. However, if the
program exits for some other reason, the temp file may not be cleaned up; so it may be
prudent to choose a temp file that can be easily found by cleanup tools.
If [fsync] is [true], the temp file will be flushed to disk before it takes the place
of the target file, thus guaranteeing that the target file will always be in a sound
state, even after a machine crash. Since synchronization is extremely slow, this is
not the default. Think carefully about the event of machine crashes and whether you
may need this option!
If [replace_special] is [false] (the default) an existing special [file] (block or
character device, socket or FIFO) will not be replaced by a regular file, the
temporary file is not created and an exception is raised. To explicitly replace an
existing special [file], [replace_special] must be passed as [true]. Note that
if [file] exists and is a directory, the rename will fail; if [file] exists and is
a symbolic link, the link will be replaced, not the target (as per [Unix.rename]).
We intend for [with_file_atomic] to mimic the behavior of the [open] system call, so
if [file] does not exist, we will apply the current umask to [perm] (the effective
permissions become [perm land lnot umask], see [man 2 open]). However, if [file] does
exist and [perm] is specified, we do something different from [open] system call: we
override the permission with [perm], ignoring the umask. This means that if you
create and then immediately overwrite the file with [with_file_atomic ~perm], then the
umask will be honored the first time and ignored the second time. If [perm] is not
specified, then any existing file permissions are preserved.
If [f] closes the writer passed to it, [with_file_atomic] raises and does not create
[file].
*)
val with_file_atomic
: ?temp_file:string
-> ?perm:Unix.file_perm
-> ?time_source:[> read ] Time_source.T1.t
-> string
-> f:(t -> 'a Deferred.t)
-> 'a Deferred.t
val save
: ?temp_file:string
-> ?perm:Unix.file_perm
-> string
-> contents:string
-> unit Deferred.t
val save_lines
: ?temp_file:string
-> ?perm:Unix.file_perm
-> string
-> string list
-> unit Deferred.t
val save_sexp
: ?temp_file:string
-> ?perm:Unix.file_perm
-> string
-> Sexp.t
-> unit Deferred.t
* [ save_sexps ] works similarly to [ save_sexp ] , but saves a sequence of sexps instead ,
separated by newlines . There is a corresponding [ ] for reading back
in .
separated by newlines. There is a corresponding [Reader.load_sexps] for reading back
in. *)
val save_sexps
: ?temp_file:string
-> ?perm:Unix.file_perm
-> string
-> Sexp.t list
-> unit Deferred.t
val save_sexps_conv
: ?temp_file:string
-> ?perm:int
-> ?hum:bool
-> string
-> 'a list
-> ('a -> Sexp.t)
-> unit Deferred.t
val save_bin_prot
: ?temp_file:string
-> ?perm:Unix.file_perm
-> string
-> 'a Bin_prot.Type_class.writer
-> 'a
-> unit Deferred.t
* [ transfer ' t pipe_r f ] repeatedly reads values from [ pipe_r ] and feeds them to [ f ] ,
which should in turn write them to [ t ] . It provides pushback to [ pipe_r ] by not
reading when [ t ] can not keep up with the data being pushed in .
By default , each read from [ pipe_r ] reads all the values in [ pipe_r ] . One can supply
[ max_num_values_per_read ] to limit the number of values per read .
The [ transfer ' ] stops and the result becomes determined when [ stop ] becomes
determined , when [ pipe_r ] reaches its EOF , when [ t ] is closed , or when [ t ] 's consumer
leaves . In the latter two cases , [ transfer ' ] closes [ pipe_r ] .
[ transfer ' ] causes [ Pipe.flushed ] on [ pipe_r ] 's writer to ensure that the bytes have
been flushed to [ t ] before returning . It also waits on [ Pipe.upstream_flushed ] at
shutdown .
[ transfer t pipe_r f ] is equivalent to :
{ [
transfer ' t pipe_r ( fun q - > Queue.iter q ~f ; return ( ) ) ] }
which should in turn write them to [t]. It provides pushback to [pipe_r] by not
reading when [t] cannot keep up with the data being pushed in.
By default, each read from [pipe_r] reads all the values in [pipe_r]. One can supply
[max_num_values_per_read] to limit the number of values per read.
The [transfer'] stops and the result becomes determined when [stop] becomes
determined, when [pipe_r] reaches its EOF, when [t] is closed, or when [t]'s consumer
leaves. In the latter two cases, [transfer'] closes [pipe_r].
[transfer'] causes [Pipe.flushed] on [pipe_r]'s writer to ensure that the bytes have
been flushed to [t] before returning. It also waits on [Pipe.upstream_flushed] at
shutdown.
[transfer t pipe_r f] is equivalent to:
{[
transfer' t pipe_r (fun q -> Queue.iter q ~f; return ()) ]} *)
val transfer'
: ?stop:unit Deferred.t
-> ?max_num_values_per_read:int
-> t
-> 'a Pipe.Reader.t
-> ('a Queue.t -> unit Deferred.t)
-> unit Deferred.t
val transfer
: ?stop:unit Deferred.t
-> ?max_num_values_per_read:int
-> t
-> 'a Pipe.Reader.t
-> ('a -> unit)
-> unit Deferred.t
val pipe : t -> string Pipe.Writer.t
* [ behave_nicely_in_pipeline ~writers ( ) ] causes the program to exit with status 141
( indicating ) if any of the consumers of [ writers ] go away . It also sets the
buffer age to unlimited , in case there is a human ( e.g. , using [ less ] ) on the other
side of the pipeline .
This can be called at the toplevel of a program , before [ Command.run ] for instance .
( this function does n't start the async scheduler ) .
(indicating SIGPIPE) if any of the consumers of [writers] go away. It also sets the
buffer age to unlimited, in case there is a human (e.g., using [less]) on the other
side of the pipeline.
This can be called at the toplevel of a program, before [Command.run] for instance.
(this function doesn't start the async scheduler). *)
val behave_nicely_in_pipeline
-> unit
-> unit
* [ set_synchronous_out_channel t out_channel ] waits until [ byte_to_write t = 0 ] , and
then mutates [ t ] so that all future writes to [ t ] synchronously call
[ Out_channel.output * ] functions to send data to the OS immediately .
[ set_synchronous_out_channel ] is used by expect tests to ensure that the interleaving
between calls to [ Core.printf ] ( and similar IO functions ) and [ Async.printf ] generates
output with the same interleaving . [ set_synchronous_out_channel ] is idempotent .
then mutates [t] so that all future writes to [t] synchronously call
[Out_channel.output*] functions to send data to the OS immediately.
[set_synchronous_out_channel] is used by expect tests to ensure that the interleaving
between calls to [Core.printf] (and similar IO functions) and [Async.printf] generates
output with the same interleaving. [set_synchronous_out_channel] is idempotent. *)
val set_synchronous_out_channel : t -> Out_channel.t -> unit Deferred.t
val using_synchronous_backing_out_channel : t -> bool
val clear_synchronous_out_channel : t -> unit
val with_synchronous_out_channel
: t
-> Out_channel.t
-> f:(unit -> 'a Deferred.t)
-> 'a Deferred.t
* [ use_synchronous_stdout_and_stderr ( ) ] causes all subsequent writes to
stdout and stderr to occur synchronously ( after any pending writes have
flushed ) .
This ensures [ printf]-family writes happen immediately , which avoids two
common sources of confusion :
{ ul
{ li unexpected interleaving of [ Core.printf ] and [ Async.printf ] calls ; and }
[ Async.printf ] calls that do n't get flushed before an application exits } }
The disadvantages are :
{ ul
this makes writes blocking , which can delay unrelated asynchronous jobs until
the consumer stops pushing back ; and }
the errors raised by write are different and it wo n't respect
{ ! behave_nicely_in_pipeline } anymore } }
stdout and stderr to occur synchronously (after any pending writes have
flushed).
This ensures [printf]-family writes happen immediately, which avoids two
common sources of confusion:
{ul
{li unexpected interleaving of [Core.printf] and [Async.printf] calls; and}
{li [Async.printf] calls that don't get flushed before an application exits}}
The disadvantages are:
{ul
{li this makes writes blocking, which can delay unrelated asynchronous jobs until
the consumer stops pushing back; and}
{li the errors raised by write are different and it won't respect
{!behave_nicely_in_pipeline} anymore}} *)
val use_synchronous_stdout_and_stderr : unit -> unit Deferred.t
module Backing_out_channel : sig
type t [@@deriving sexp_of]
val create
: output_char:(char -> unit)
-> output_chars:(bigstring -> len:int -> unit)
-> flush:(unit -> unit)
-> sexp:(unit -> Sexp.t)
-> t
val of_out_channel : Out_channel.t -> t
val of_output_char : (char -> unit) -> t
end
val set_synchronous_backing_out_channel : t -> Backing_out_channel.t -> unit Deferred.t
val with_synchronous_backing_out_channel
: t
-> Backing_out_channel.t
-> f:(unit -> 'a Deferred.t)
-> 'a Deferred.t
module Private : sig
module Check_buffer_age : sig
module Internal_for_unit_test : sig
val check_now : check_invariants:bool -> time_source:Time_source.t -> unit
val num_active_checks_for : Time_source.t -> int option
end
end
val set_bytes_received : t -> Int63.t -> unit
val set_bytes_written : t -> Int63.t -> unit
end
end
module type Writer = sig
include Writer0
val of_pipe
: ?time_source:[> read ] Time_source.T1.t
-> Info.t
-> string Pipe.Writer.t
-> (t * [ `Closed_and_flushed_downstream of unit Deferred.t ]) Deferred.t
end
|
7877fd7bc10733ac914121a338c6f3ff504f48e683380e775d59611864bc3c8c | gafiatulin/codewars | FakeBinary.hs | -- Fake Binary
module Codewars.Kata.FakeBinary where
fakeBin :: String -> String
fakeBin = map (\c -> if c < '5' then '0' else '1' )
| null | https://raw.githubusercontent.com/gafiatulin/codewars/535db608333e854be93ecfc165686a2162264fef/src/8%20kyu/FakeBinary.hs | haskell | Fake Binary |
module Codewars.Kata.FakeBinary where
fakeBin :: String -> String
fakeBin = map (\c -> if c < '5' then '0' else '1' )
|
8d55af5e13401e36d629bd26a2d10f632d38f1504b0e9635bf4d0cdc2d319c3b | Clozure/ccl | x86-numbers.lisp | -*- Mode : Lisp ; Package : CCL -*-
;;;
Copyright 1994 - 2009 Clozure Associates
;;;
Licensed under the Apache License , Version 2.0 ( the " License " ) ;
;;; you may not use this file except in compliance with the License.
;;; You may obtain a copy of the License at
;;;
;;; -2.0
;;;
;;; Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an " AS IS " BASIS ,
;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;;; See the License for the specific language governing permissions and
;;; limitations under the License.
(in-package "CCL")
#+x8664-target
(progn
(defx86lapfunction %fixnum-signum ((number arg_z))
(movq ($ '-1) (% arg_x))
(movq ($ '1) (% arg_y))
(testq (% number) (% number))
(cmovsq (% arg_x) (% arg_z))
(cmovnsq (% arg_y) (% arg_z))
(single-value-return))
;;; see %logcount.
(defx86lapfunction %ilogcount ((number arg_z))
(let ((rshift imm0)
(temp imm1))
(unbox-fixnum number rshift)
(xorq (% arg_z) (% arg_z))
(testq (% rshift) (% rshift))
(jmp @test)
@next
(lea (@ -1 (% rshift)) (% temp))
(and (% temp) (% rshift)) ; sets flags
(lea (@ '1 (% arg_z)) (% arg_z)) ; doesn't set flags
@test
(jne @next)
(single-value-return)))
(defx86lapfunction %iash ((number arg_y) (count arg_z))
(unbox-fixnum count imm1)
(unbox-fixnum number imm0)
(xorq (% rcx) (% rcx)) ;rcx = imm2
(testq (% count) (% count))
(jge @left)
(subb (% imm1.b) (% cl))
(sar (% cl) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return)
@left
(movb (% imm1.b) (% cl))
(shl (% cl) (% number))
(movq (% number) (% arg_z))
(single-value-return))
(defparameter *double-float-zero* 0.0d0)
(defparameter *short-float-zero* 0.0s0)
(defx86lapfunction %fixnum-intlen ((number arg_z))
(unbox-fixnum arg_z imm0)
(movq (% imm0) (% imm1))
(notq (% imm1))
(testq (% imm0) (% imm0))
(cmovsq (% imm1) (% imm0))
(bsrq (% imm0) (% imm0))
(setne (% imm1.b))
(addb (% imm1.b) (% imm0.b))
(box-fixnum imm0 arg_z)
(single-value-return))
Caller guarantees that result fits in a fixnum .
(defx86lapfunction %truncate-double-float->fixnum ((arg arg_z))
(get-double-float arg fp1)
(cvttsd2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
(defx86lapfunction %truncate-short-float->fixnum ((arg arg_z))
(get-single-float arg fp1)
(cvttss2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
;;; DOES round to even
(defx86lapfunction %round-nearest-double-float->fixnum ((arg arg_z))
(get-double-float arg fp1)
(cvtsd2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
(defx86lapfunction %round-nearest-short-float->fixnum ((arg arg_z))
(get-single-float arg fp1)
(cvtss2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
;;; We'll get a SIGFPE if divisor is 0.
;;; Don't use %rbp. Trust callback_for_interrupt() to preserve
;;; the word below the stack pointer
(defx86lapfunction %fixnum-truncate ((dividend arg_y) (divisor arg_z))
(save-simple-frame)
(cmpq ($ '-1) (% divisor))
(je @neg)
(unbox-fixnum divisor imm0)
(movq (% imm0) (% imm2))
(unbox-fixnum dividend imm0)
(cqto) ; imm1 := sign_extend(imm0)
(idivq (% imm2))
(pop (% rbp))
(movq (% rsp) (% temp0))
(box-fixnum imm1 arg_y)
(box-fixnum imm0 arg_z)
(pushq (% arg_z))
(pushq (% arg_y))
(set-nargs 2)
(jmp-subprim .SPvalues)
@neg
(negq (% dividend))
(load-constant *least-positive-bignum* arg_z)
(cmovoq (@ x8664::symbol.vcell (% arg_z)) (% dividend))
(pop (% rbp))
(movq (% rsp) (% temp0))
(pushq (% dividend))
(pushq ($ 0))
(set-nargs 2)
(jmp-subprim .SPvalues))
(defx86lapfunction called-for-mv-p ()
(ref-global ret1valaddr imm0)
(movq (@ x8664::lisp-frame.return-address (% rbp)) (% imm1))
(cmpq (% imm0) (% imm1))
(movq ($ t) (% imm0))
(movq ($ nil) (% arg_z))
(cmoveq (% imm0) (% arg_z))
(single-value-return))
n1 and n2 must be positive ( esp non zero )
(defx86lapfunction %fixnum-gcd ((boxed-u arg_y) (boxed-v arg_z))
(let ((u imm0)
(v imm1)
(k imm2))
(xorl (% imm2.l) (% imm2.l))
(bsfq (% boxed-u) (% u))
(bsfq (% boxed-v) (% v))
(rcmp (% u) (% v))
(cmovlel (%l u) (%l k))
(cmovgl (%l v) (%l k))
(unbox-fixnum boxed-u u)
(unbox-fixnum boxed-v v)
(subb ($ x8664::fixnumshift) (%b k))
(jz @start)
(shrq (% cl) (% u))
(shrq (% cl) (% v))
@start
At least one of u or v is odd at this point
@loop
if u is even , shift it right one bit
(testb ($ 1) (%b u))
(jne @u-odd)
(shrq ($ 1) (% u))
(jmp @test)
@u-odd
if v is even , shift it right one bit
(testb ($ 1) (%b v))
(jne @both-odd)
(shrq ($ 1) (% v))
(jmp @test-u)
@both-odd
(cmpq (% v) (% u))
(jb @v>u)
(subq (% v) (% u))
(shrq ($ 1) (% u))
(jmp @test)
@v>u
(subq (% u) (% v))
(shrq ($ 1) (% v))
@test-u
(testq (% u) (% u))
@test
(ja @loop)
(shlq (% cl) (% v))
(movb ($ 0) (% cl))
(box-fixnum v arg_z)
(single-value-return)))
(defx86lapfunction %mrg31k3p ((state arg_z))
(let ((seed temp0)
(m1 #x7fffffff)
(m2 #x7fffadb3)
(negative-m1 #x80000001)
(negative-m2 #x8000524d))
(svref state 1 seed)
(movl (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)) (% imm0.l))
(andl ($ #x1ff) (% imm0.l))
(shll ($ 22) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)) (% imm1.l))
(shrl ($ 9) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)) (% imm1.l))
(andl ($ #xffffff) (% imm1.l))
(shll ($ 7) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)) (% imm1.l))
(shrl ($ 24) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(leal (@ negative-m1 (% imm0.l)) (% imm1.l))
(cmpl ($ m1) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(addl (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)) (% imm0.l))
(leal (@ negative-m1 (% imm0.l)) (% imm1.l))
(cmpl ($ m1) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
;; update state
(movl (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)))
(movl (@ (+ x8664::misc-data-offset (* 4 0)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)))
(movl (% imm0.l) (@ (+ x8664::misc-data-offset (* 4 0)) (% seed)))
second component
(movl (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)) (% imm0.l))
(andl ($ #xffff) (% imm0.l))
(shll ($ 15) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)) (% imm1.l))
(shrl ($ 16) (% imm1.l))
(imull ($ 21069) (% imm1.l) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(movl (% imm0.l) (% imm2.l)) ;stash t1
(movl (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)) (% imm0.l))
(andl ($ #xffff) (% imm0.l))
(shll ($ 15) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)) (% imm1.l))
(shrl ($ 16) (% imm1.l))
(imull ($ 21069) (% imm1.l) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(addl (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(addl (% imm2.l) (% imm0.l)) ;add in t1
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
;; update state
(movl (@ (+ x8664::misc-data-offset (* 4 4)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)))
(movl (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 4)) (% seed)))
(movl (% imm0.l) (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)))
;; combination
(movl (@ (+ x8664::misc-data-offset (* 4 0)) (% seed)) (% imm1.l))
(xchgl (% imm1.l) (% imm0.l)) ;for sanity
(rcmpl (% imm0.l) (% imm1.l))
(ja @ok)
(subl (% imm1.l) (% imm0.l))
(addl ($ m1) (% imm0.l))
(box-fixnum imm0 arg_z)
(single-value-return)
@ok
(subl (% imm1.l) (% imm0.l))
(box-fixnum imm0 arg_z)
(single-value-return)))
;;; These things (or something like them) should get inlined soon.
;;; Recall that (COMPLEX SINGLE-FLOAT) and (COMPLEX DOUBLE-FLOAT)
objects are viewed as having 32 - bit elements and are viewed
;; as having some extra words for alignment.
(defx86lapfunction %make-complex-double-float ((r arg_y) (i arg_z))
(movsd (@ target::misc-dfloat-offset (% r)) (% xmm0))
(movsd (@ target::misc-dfloat-offset (% i)) (% xmm1))
(unpcklpd (% xmm1) (% xmm0))
(movl ($ (logior (ash 6 x8664::num-subtag-bits) x8664::subtag-complex-double-float)) (%l imm0))
(movl ($ (- (* 3 16) x8664::fulltag-misc)) (%l imm1))
(subq (% imm1) (:rcontext x8664::tcr.save-allocptr))
(movq (:rcontext x8664::tcr.save-allocptr) (% allocptr))
(cmpq (:rcontext x8664::tcr.save-allocbase) (% allocptr))
(ja @no-trap)
(uuo-alloc)
@no-trap
(movq (% imm0) (@ x8664::misc-header-offset (% temp0)))
(andb ($ (lognot x8664::fulltagmask)) (:rcontext x8664::tcr.save-allocptr))
(movq (% allocptr) (% arg_z))
(movdqa (% xmm0) (@ x8664::complex-double-float.realpart (% arg_z)))
(single-value-return))
(defx86lapfunction %make-complex-single-float ((r arg_y) (i arg_z))
(movd (% r) (% xmm0))
(psrlq ($ 32) (% xmm0))
(movd (% i) (% xmm1))
(psrlq ($ 32) (% xmm1))
(unpcklps (% xmm1) (% xmm0))
(movl ($ (logior (ash 2 x8664::num-subtag-bits) x8664::subtag-complex-single-float)) (%l imm0))
(movl ($ (- (* 1 16) x8664::fulltag-misc)) (%l imm1))
(subq (% imm1) (:rcontext x8664::tcr.save-allocptr))
(movq (:rcontext x8664::tcr.save-allocptr) (% allocptr))
(cmpq (:rcontext x8664::tcr.save-allocbase) (% allocptr))
(ja @no-trap)
(uuo-alloc)
@no-trap
(movq (% imm0) (@ x8664::misc-header-offset (% temp0)))
(andb ($ (lognot x8664::fulltagmask)) (:rcontext x8664::tcr.save-allocptr))
(movq (% allocptr) (% arg_z))
(movq (% xmm0) (@ x8664::complex-single-float.realpart (% arg_z)))
(single-value-return))
Write the hex digits ( including leading zeros ) that represent the
;;; unsigned-byte contained in FIXNUM into STRING, which must be long
enough . END - IDX is the position in the string where the rightmost
hex digit should go . END - IDX should be 14 for ( unsigned - byte 60 ) ,
and 7 for ( unsigned - byte 32 ) . This function assumes that the
;;; starting index (for the leftmost hex digit) is 0.
(defx86lapfunction %ub-fixnum-hex-digits ((end-idx arg_x) (fixnum arg_y)
(string arg_z))
(let ((index imm1))
;; We want index to start out as the index into the string of the
rightmost hex digit . Thus , for ( unsigned - byte 60 ) , index will
be 14 . For ( unsigned - byte 32 ) , it will be 7 .
(unbox-fixnum end-idx index)
(unbox-fixnum fixnum imm2)
;; Fill in string, from right to left, with hex digits of fixnum.
@loop
(movl ($ #xf) (% imm0.l))
(andl (% imm2.l) (% imm0.l)) ;get low nibble
(cmpl ($ 10) (% imm0.l)) ;convert to char code
(jb @small)
extra for digits # \A through # \F
(addl ($ 7) (% imm0.l))
@small
48 is ( char - code # \0 )
(addl ($ 48) (% imm0.l))
(movl (% imm0.l) (@ x8664::misc-data-offset (% string) (% index) 4))
(sarq ($ 4) (% imm2)) ;shift in next lowest nibble
(subq ($ 1) (% index))
(jae @loop)
(single-value-return)))
;;; End of x86-numbers.lisp
) ; #+x8664-target
| null | https://raw.githubusercontent.com/Clozure/ccl/6c1a9458f7a5437b73ec227e989aa5b825f32fd3/level-0/X86/x86-numbers.lisp | lisp | Package : CCL -*-
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-2.0
Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
see %logcount.
sets flags
doesn't set flags
rcx = imm2
DOES round to even
We'll get a SIGFPE if divisor is 0.
Don't use %rbp. Trust callback_for_interrupt() to preserve
the word below the stack pointer
imm1 := sign_extend(imm0)
update state
stash t1
add in t1
update state
combination
for sanity
These things (or something like them) should get inlined soon.
Recall that (COMPLEX SINGLE-FLOAT) and (COMPLEX DOUBLE-FLOAT)
as having some extra words for alignment.
unsigned-byte contained in FIXNUM into STRING, which must be long
starting index (for the leftmost hex digit) is 0.
We want index to start out as the index into the string of the
Fill in string, from right to left, with hex digits of fixnum.
get low nibble
convert to char code
shift in next lowest nibble
End of x86-numbers.lisp
#+x8664-target | Copyright 1994 - 2009 Clozure Associates
distributed under the License is distributed on an " AS IS " BASIS ,
(in-package "CCL")
#+x8664-target
(progn
(defx86lapfunction %fixnum-signum ((number arg_z))
(movq ($ '-1) (% arg_x))
(movq ($ '1) (% arg_y))
(testq (% number) (% number))
(cmovsq (% arg_x) (% arg_z))
(cmovnsq (% arg_y) (% arg_z))
(single-value-return))
(defx86lapfunction %ilogcount ((number arg_z))
(let ((rshift imm0)
(temp imm1))
(unbox-fixnum number rshift)
(xorq (% arg_z) (% arg_z))
(testq (% rshift) (% rshift))
(jmp @test)
@next
(lea (@ -1 (% rshift)) (% temp))
@test
(jne @next)
(single-value-return)))
(defx86lapfunction %iash ((number arg_y) (count arg_z))
(unbox-fixnum count imm1)
(unbox-fixnum number imm0)
(testq (% count) (% count))
(jge @left)
(subb (% imm1.b) (% cl))
(sar (% cl) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return)
@left
(movb (% imm1.b) (% cl))
(shl (% cl) (% number))
(movq (% number) (% arg_z))
(single-value-return))
(defparameter *double-float-zero* 0.0d0)
(defparameter *short-float-zero* 0.0s0)
(defx86lapfunction %fixnum-intlen ((number arg_z))
(unbox-fixnum arg_z imm0)
(movq (% imm0) (% imm1))
(notq (% imm1))
(testq (% imm0) (% imm0))
(cmovsq (% imm1) (% imm0))
(bsrq (% imm0) (% imm0))
(setne (% imm1.b))
(addb (% imm1.b) (% imm0.b))
(box-fixnum imm0 arg_z)
(single-value-return))
Caller guarantees that result fits in a fixnum .
(defx86lapfunction %truncate-double-float->fixnum ((arg arg_z))
(get-double-float arg fp1)
(cvttsd2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
(defx86lapfunction %truncate-short-float->fixnum ((arg arg_z))
(get-single-float arg fp1)
(cvttss2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
(defx86lapfunction %round-nearest-double-float->fixnum ((arg arg_z))
(get-double-float arg fp1)
(cvtsd2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
(defx86lapfunction %round-nearest-short-float->fixnum ((arg arg_z))
(get-single-float arg fp1)
(cvtss2si (% fp1) (% imm0))
(box-fixnum imm0 arg_z)
(single-value-return))
(defx86lapfunction %fixnum-truncate ((dividend arg_y) (divisor arg_z))
(save-simple-frame)
(cmpq ($ '-1) (% divisor))
(je @neg)
(unbox-fixnum divisor imm0)
(movq (% imm0) (% imm2))
(unbox-fixnum dividend imm0)
(idivq (% imm2))
(pop (% rbp))
(movq (% rsp) (% temp0))
(box-fixnum imm1 arg_y)
(box-fixnum imm0 arg_z)
(pushq (% arg_z))
(pushq (% arg_y))
(set-nargs 2)
(jmp-subprim .SPvalues)
@neg
(negq (% dividend))
(load-constant *least-positive-bignum* arg_z)
(cmovoq (@ x8664::symbol.vcell (% arg_z)) (% dividend))
(pop (% rbp))
(movq (% rsp) (% temp0))
(pushq (% dividend))
(pushq ($ 0))
(set-nargs 2)
(jmp-subprim .SPvalues))
(defx86lapfunction called-for-mv-p ()
(ref-global ret1valaddr imm0)
(movq (@ x8664::lisp-frame.return-address (% rbp)) (% imm1))
(cmpq (% imm0) (% imm1))
(movq ($ t) (% imm0))
(movq ($ nil) (% arg_z))
(cmoveq (% imm0) (% arg_z))
(single-value-return))
n1 and n2 must be positive ( esp non zero )
(defx86lapfunction %fixnum-gcd ((boxed-u arg_y) (boxed-v arg_z))
(let ((u imm0)
(v imm1)
(k imm2))
(xorl (% imm2.l) (% imm2.l))
(bsfq (% boxed-u) (% u))
(bsfq (% boxed-v) (% v))
(rcmp (% u) (% v))
(cmovlel (%l u) (%l k))
(cmovgl (%l v) (%l k))
(unbox-fixnum boxed-u u)
(unbox-fixnum boxed-v v)
(subb ($ x8664::fixnumshift) (%b k))
(jz @start)
(shrq (% cl) (% u))
(shrq (% cl) (% v))
@start
At least one of u or v is odd at this point
@loop
if u is even , shift it right one bit
(testb ($ 1) (%b u))
(jne @u-odd)
(shrq ($ 1) (% u))
(jmp @test)
@u-odd
if v is even , shift it right one bit
(testb ($ 1) (%b v))
(jne @both-odd)
(shrq ($ 1) (% v))
(jmp @test-u)
@both-odd
(cmpq (% v) (% u))
(jb @v>u)
(subq (% v) (% u))
(shrq ($ 1) (% u))
(jmp @test)
@v>u
(subq (% u) (% v))
(shrq ($ 1) (% v))
@test-u
(testq (% u) (% u))
@test
(ja @loop)
(shlq (% cl) (% v))
(movb ($ 0) (% cl))
(box-fixnum v arg_z)
(single-value-return)))
(defx86lapfunction %mrg31k3p ((state arg_z))
(let ((seed temp0)
(m1 #x7fffffff)
(m2 #x7fffadb3)
(negative-m1 #x80000001)
(negative-m2 #x8000524d))
(svref state 1 seed)
(movl (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)) (% imm0.l))
(andl ($ #x1ff) (% imm0.l))
(shll ($ 22) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)) (% imm1.l))
(shrl ($ 9) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)) (% imm1.l))
(andl ($ #xffffff) (% imm1.l))
(shll ($ 7) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)) (% imm1.l))
(shrl ($ 24) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(leal (@ negative-m1 (% imm0.l)) (% imm1.l))
(cmpl ($ m1) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(addl (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)) (% imm0.l))
(leal (@ negative-m1 (% imm0.l)) (% imm1.l))
(cmpl ($ m1) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 2)) (% seed)))
(movl (@ (+ x8664::misc-data-offset (* 4 0)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 1)) (% seed)))
(movl (% imm0.l) (@ (+ x8664::misc-data-offset (* 4 0)) (% seed)))
second component
(movl (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)) (% imm0.l))
(andl ($ #xffff) (% imm0.l))
(shll ($ 15) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)) (% imm1.l))
(shrl ($ 16) (% imm1.l))
(imull ($ 21069) (% imm1.l) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)) (% imm0.l))
(andl ($ #xffff) (% imm0.l))
(shll ($ 15) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)) (% imm1.l))
(shrl ($ 16) (% imm1.l))
(imull ($ 21069) (% imm1.l) (% imm1.l))
(addl (% imm1.l) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(addl (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(leal (@ negative-m2 (% imm0.l)) (% imm1.l))
(cmpl ($ m2) (% imm0.l))
(cmovael (% imm1.l) (% imm0.l))
(movl (@ (+ x8664::misc-data-offset (* 4 4)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 5)) (% seed)))
(movl (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)) (% imm1.l))
(movl (% imm1.l) (@ (+ x8664::misc-data-offset (* 4 4)) (% seed)))
(movl (% imm0.l) (@ (+ x8664::misc-data-offset (* 4 3)) (% seed)))
(movl (@ (+ x8664::misc-data-offset (* 4 0)) (% seed)) (% imm1.l))
(rcmpl (% imm0.l) (% imm1.l))
(ja @ok)
(subl (% imm1.l) (% imm0.l))
(addl ($ m1) (% imm0.l))
(box-fixnum imm0 arg_z)
(single-value-return)
@ok
(subl (% imm1.l) (% imm0.l))
(box-fixnum imm0 arg_z)
(single-value-return)))
objects are viewed as having 32 - bit elements and are viewed
(defx86lapfunction %make-complex-double-float ((r arg_y) (i arg_z))
(movsd (@ target::misc-dfloat-offset (% r)) (% xmm0))
(movsd (@ target::misc-dfloat-offset (% i)) (% xmm1))
(unpcklpd (% xmm1) (% xmm0))
(movl ($ (logior (ash 6 x8664::num-subtag-bits) x8664::subtag-complex-double-float)) (%l imm0))
(movl ($ (- (* 3 16) x8664::fulltag-misc)) (%l imm1))
(subq (% imm1) (:rcontext x8664::tcr.save-allocptr))
(movq (:rcontext x8664::tcr.save-allocptr) (% allocptr))
(cmpq (:rcontext x8664::tcr.save-allocbase) (% allocptr))
(ja @no-trap)
(uuo-alloc)
@no-trap
(movq (% imm0) (@ x8664::misc-header-offset (% temp0)))
(andb ($ (lognot x8664::fulltagmask)) (:rcontext x8664::tcr.save-allocptr))
(movq (% allocptr) (% arg_z))
(movdqa (% xmm0) (@ x8664::complex-double-float.realpart (% arg_z)))
(single-value-return))
(defx86lapfunction %make-complex-single-float ((r arg_y) (i arg_z))
(movd (% r) (% xmm0))
(psrlq ($ 32) (% xmm0))
(movd (% i) (% xmm1))
(psrlq ($ 32) (% xmm1))
(unpcklps (% xmm1) (% xmm0))
(movl ($ (logior (ash 2 x8664::num-subtag-bits) x8664::subtag-complex-single-float)) (%l imm0))
(movl ($ (- (* 1 16) x8664::fulltag-misc)) (%l imm1))
(subq (% imm1) (:rcontext x8664::tcr.save-allocptr))
(movq (:rcontext x8664::tcr.save-allocptr) (% allocptr))
(cmpq (:rcontext x8664::tcr.save-allocbase) (% allocptr))
(ja @no-trap)
(uuo-alloc)
@no-trap
(movq (% imm0) (@ x8664::misc-header-offset (% temp0)))
(andb ($ (lognot x8664::fulltagmask)) (:rcontext x8664::tcr.save-allocptr))
(movq (% allocptr) (% arg_z))
(movq (% xmm0) (@ x8664::complex-single-float.realpart (% arg_z)))
(single-value-return))
Write the hex digits ( including leading zeros ) that represent the
enough . END - IDX is the position in the string where the rightmost
hex digit should go . END - IDX should be 14 for ( unsigned - byte 60 ) ,
and 7 for ( unsigned - byte 32 ) . This function assumes that the
(defx86lapfunction %ub-fixnum-hex-digits ((end-idx arg_x) (fixnum arg_y)
(string arg_z))
(let ((index imm1))
rightmost hex digit . Thus , for ( unsigned - byte 60 ) , index will
be 14 . For ( unsigned - byte 32 ) , it will be 7 .
(unbox-fixnum end-idx index)
(unbox-fixnum fixnum imm2)
@loop
(movl ($ #xf) (% imm0.l))
(jb @small)
extra for digits # \A through # \F
(addl ($ 7) (% imm0.l))
@small
48 is ( char - code # \0 )
(addl ($ 48) (% imm0.l))
(movl (% imm0.l) (@ x8664::misc-data-offset (% string) (% index) 4))
(subq ($ 1) (% index))
(jae @loop)
(single-value-return)))
|
e50bd93be34af9e142cdd61b30b8e13f55bdb5c5c6d2037872afcbe5ad196ac4 | bytekid/mkbtt | selectionLexer.ml | # 20 "selectionLexer.mll"
(* lexing selection strategies *)
open SelectionParser
# 7 "selectionLexer.ml"
let __ocaml_lex_tables = {
Lexing.lex_base =
"\000\000\233\255\234\255\235\255\236\255\237\255\238\255\239\255\
\241\255\243\255\244\255\245\255\246\255\247\255\000\000\000\000\
\000\000\000\000\000\000\001\000\002\000\000\000\001\000\000\000\
\255\255\004\000\254\255\253\255\009\000\252\255\251\255\005\000\
\005\000\250\255\249\255\016\000";
Lexing.lex_backtrk =
"\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\022\000\013\000\
\022\000\015\000\022\000\022\000\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\007\000";
Lexing.lex_default =
"\001\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\000\000\255\255\000\000\000\000\255\255\000\000\000\000\255\255\
\255\255\000\000\000\000\255\255";
Lexing.lex_trans =
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\003\000\000\000\000\000\012\000\000\000\000\000\000\000\000\000\
\007\000\006\000\009\000\011\000\005\000\010\000\035\000\000\000\
\014\000\014\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\004\000\000\000\000\000\000\000\000\000\013\000\
\035\000\035\000\035\000\035\000\035\000\035\000\035\000\035\000\
\035\000\035\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\031\000\023\000\017\000\016\000\015\000\033\000\000\000\
\000\000\028\000\000\000\000\000\034\000\018\000\022\000\027\000\
\030\000\026\000\008\000\019\000\021\000\025\000\020\000\029\000\
\024\000\032\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000";
Lexing.lex_check =
"\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\000\000\255\255\255\255\000\000\255\255\255\255\255\255\255\255\
\000\000\000\000\000\000\000\000\000\000\000\000\014\000\255\255\
\000\000\000\000\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\000\000\255\255\255\255\255\255\255\255\000\000\
\035\000\035\000\035\000\035\000\035\000\035\000\035\000\035\000\
\035\000\035\000\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\016\000\022\000\000\000\000\000\000\000\032\000\255\255\
\255\255\018\000\255\255\255\255\015\000\000\000\019\000\020\000\
\017\000\025\000\000\000\000\000\019\000\021\000\019\000\028\000\
\023\000\031\000\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\000\000\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255";
Lexing.lex_base_code =
"";
Lexing.lex_backtrk_code =
"";
Lexing.lex_default_code =
"";
Lexing.lex_trans_code =
"";
Lexing.lex_check_code =
"";
Lexing.lex_code =
"";
}
let rec token lexbuf =
__ocaml_lex_token_rec lexbuf 0
and __ocaml_lex_token_rec lexbuf __ocaml_lex_state =
match Lexing.engine __ocaml_lex_tables __ocaml_lex_state lexbuf with
| 0 ->
# 25 "selectionLexer.mll"
(SIZE_MAX)
# 120 "selectionLexer.ml"
| 1 ->
# 26 "selectionLexer.mll"
(SIZE_SUM)
# 125 "selectionLexer.ml"
| 2 ->
# 27 "selectionLexer.mll"
(SUM)
# 130 "selectionLexer.ml"
| 3 ->
# 28 "selectionLexer.mll"
(MIN)
# 135 "selectionLexer.ml"
| 4 ->
# 29 "selectionLexer.mll"
(CP)
# 140 "selectionLexer.ml"
| 5 ->
# 30 "selectionLexer.mll"
(DATA)
# 145 "selectionLexer.ml"
| 6 ->
# 31 "selectionLexer.mll"
(ELABEL)
# 150 "selectionLexer.ml"
| 7 ->
# 32 "selectionLexer.mll"
(FLOAT(Lexing.lexeme lexbuf))
# 155 "selectionLexer.ml"
| 8 ->
# 33 "selectionLexer.mll"
(RANDOM)
# 160 "selectionLexer.ml"
| 9 ->
# 34 "selectionLexer.mll"
(COUNT)
# 165 "selectionLexer.ml"
| 10 ->
# 35 "selectionLexer.mll"
(PLUS)
# 170 "selectionLexer.ml"
| 11 ->
# 36 "selectionLexer.mll"
(MINUS)
# 175 "selectionLexer.ml"
| 12 ->
# 37 "selectionLexer.mll"
(TIMESTAMP)
# 180 "selectionLexer.ml"
| 13 ->
# 38 "selectionLexer.mll"
(E)
# 185 "selectionLexer.ml"
| 14 ->
# 39 "selectionLexer.mll"
(R)
# 190 "selectionLexer.ml"
| 15 ->
# 40 "selectionLexer.mll"
(C)
# 195 "selectionLexer.ml"
| 16 ->
# 41 "selectionLexer.mll"
(LPAREN)
# 200 "selectionLexer.ml"
| 17 ->
# 42 "selectionLexer.mll"
(RPAREN)
# 205 "selectionLexer.ml"
| 18 ->
# 43 "selectionLexer.mll"
(COMMA)
# 210 "selectionLexer.ml"
| 19 ->
# 44 "selectionLexer.mll"
(COLON)
# 215 "selectionLexer.ml"
| 20 ->
# 45 "selectionLexer.mll"
(token lexbuf)
# 220 "selectionLexer.ml"
| 21 ->
# 46 "selectionLexer.mll"
(EOF)
# 225 "selectionLexer.ml"
| 22 ->
# 47 "selectionLexer.mll"
(token lexbuf)
# 230 "selectionLexer.ml"
| __ocaml_lex_state -> lexbuf.Lexing.refill_buff lexbuf; __ocaml_lex_token_rec lexbuf __ocaml_lex_state
;;
| null | https://raw.githubusercontent.com/bytekid/mkbtt/c2f8e0615389b52eabd12655fe48237aa0fe83fd/src/mascott/src/selectionLexer.ml | ocaml | lexing selection strategies | # 20 "selectionLexer.mll"
open SelectionParser
# 7 "selectionLexer.ml"
let __ocaml_lex_tables = {
Lexing.lex_base =
"\000\000\233\255\234\255\235\255\236\255\237\255\238\255\239\255\
\241\255\243\255\244\255\245\255\246\255\247\255\000\000\000\000\
\000\000\000\000\000\000\001\000\002\000\000\000\001\000\000\000\
\255\255\004\000\254\255\253\255\009\000\252\255\251\255\005\000\
\005\000\250\255\249\255\016\000";
Lexing.lex_backtrk =
"\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\022\000\013\000\
\022\000\015\000\022\000\022\000\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\007\000";
Lexing.lex_default =
"\001\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\000\000\255\255\000\000\000\000\255\255\000\000\000\000\255\255\
\255\255\000\000\000\000\255\255";
Lexing.lex_trans =
"\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\003\000\000\000\000\000\012\000\000\000\000\000\000\000\000\000\
\007\000\006\000\009\000\011\000\005\000\010\000\035\000\000\000\
\014\000\014\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\004\000\000\000\000\000\000\000\000\000\013\000\
\035\000\035\000\035\000\035\000\035\000\035\000\035\000\035\000\
\035\000\035\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\031\000\023\000\017\000\016\000\015\000\033\000\000\000\
\000\000\028\000\000\000\000\000\034\000\018\000\022\000\027\000\
\030\000\026\000\008\000\019\000\021\000\025\000\020\000\029\000\
\024\000\032\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\002\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\000\
\000\000";
Lexing.lex_check =
"\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\000\000\255\255\255\255\000\000\255\255\255\255\255\255\255\255\
\000\000\000\000\000\000\000\000\000\000\000\000\014\000\255\255\
\000\000\000\000\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\000\000\255\255\255\255\255\255\255\255\000\000\
\035\000\035\000\035\000\035\000\035\000\035\000\035\000\035\000\
\035\000\035\000\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\016\000\022\000\000\000\000\000\000\000\032\000\255\255\
\255\255\018\000\255\255\255\255\015\000\000\000\019\000\020\000\
\017\000\025\000\000\000\000\000\019\000\021\000\019\000\028\000\
\023\000\031\000\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\000\000\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\255\
\255\255";
Lexing.lex_base_code =
"";
Lexing.lex_backtrk_code =
"";
Lexing.lex_default_code =
"";
Lexing.lex_trans_code =
"";
Lexing.lex_check_code =
"";
Lexing.lex_code =
"";
}
let rec token lexbuf =
__ocaml_lex_token_rec lexbuf 0
and __ocaml_lex_token_rec lexbuf __ocaml_lex_state =
match Lexing.engine __ocaml_lex_tables __ocaml_lex_state lexbuf with
| 0 ->
# 25 "selectionLexer.mll"
(SIZE_MAX)
# 120 "selectionLexer.ml"
| 1 ->
# 26 "selectionLexer.mll"
(SIZE_SUM)
# 125 "selectionLexer.ml"
| 2 ->
# 27 "selectionLexer.mll"
(SUM)
# 130 "selectionLexer.ml"
| 3 ->
# 28 "selectionLexer.mll"
(MIN)
# 135 "selectionLexer.ml"
| 4 ->
# 29 "selectionLexer.mll"
(CP)
# 140 "selectionLexer.ml"
| 5 ->
# 30 "selectionLexer.mll"
(DATA)
# 145 "selectionLexer.ml"
| 6 ->
# 31 "selectionLexer.mll"
(ELABEL)
# 150 "selectionLexer.ml"
| 7 ->
# 32 "selectionLexer.mll"
(FLOAT(Lexing.lexeme lexbuf))
# 155 "selectionLexer.ml"
| 8 ->
# 33 "selectionLexer.mll"
(RANDOM)
# 160 "selectionLexer.ml"
| 9 ->
# 34 "selectionLexer.mll"
(COUNT)
# 165 "selectionLexer.ml"
| 10 ->
# 35 "selectionLexer.mll"
(PLUS)
# 170 "selectionLexer.ml"
| 11 ->
# 36 "selectionLexer.mll"
(MINUS)
# 175 "selectionLexer.ml"
| 12 ->
# 37 "selectionLexer.mll"
(TIMESTAMP)
# 180 "selectionLexer.ml"
| 13 ->
# 38 "selectionLexer.mll"
(E)
# 185 "selectionLexer.ml"
| 14 ->
# 39 "selectionLexer.mll"
(R)
# 190 "selectionLexer.ml"
| 15 ->
# 40 "selectionLexer.mll"
(C)
# 195 "selectionLexer.ml"
| 16 ->
# 41 "selectionLexer.mll"
(LPAREN)
# 200 "selectionLexer.ml"
| 17 ->
# 42 "selectionLexer.mll"
(RPAREN)
# 205 "selectionLexer.ml"
| 18 ->
# 43 "selectionLexer.mll"
(COMMA)
# 210 "selectionLexer.ml"
| 19 ->
# 44 "selectionLexer.mll"
(COLON)
# 215 "selectionLexer.ml"
| 20 ->
# 45 "selectionLexer.mll"
(token lexbuf)
# 220 "selectionLexer.ml"
| 21 ->
# 46 "selectionLexer.mll"
(EOF)
# 225 "selectionLexer.ml"
| 22 ->
# 47 "selectionLexer.mll"
(token lexbuf)
# 230 "selectionLexer.ml"
| __ocaml_lex_state -> lexbuf.Lexing.refill_buff lexbuf; __ocaml_lex_token_rec lexbuf __ocaml_lex_state
;;
|
5e0668d4201e91c0fdcb0a161b76c3d6a7680daedb723f8aa22916847ce95317 | YoshikuniJujo/funpaala | useSome.hs | import Some
d :: Int
d = a * b
| null | https://raw.githubusercontent.com/YoshikuniJujo/funpaala/5366130826da0e6b1180992dfff94c4a634cda99/samples/24_adt_module/useSome.hs | haskell | import Some
d :: Int
d = a * b
|
|
6763ef0e06042b76e3714b37bc6b78bb88efbdebb25f8f2068fb8541e2362526 | ndmitchell/hlint | Brackets.hs | # LANGUAGE MultiParamTypeClasses , FlexibleInstances , FlexibleContexts #
# OPTIONS_GHC -Wno - incomplete - patterns -Wno - overlapping - patterns #
module GHC.Util.Brackets (Brackets(..), isApp,isOpApp,isAnyApp) where
import GHC.Hs
import GHC.Types.SrcLoc
import GHC.Types.SourceText
import Language.Haskell.GhclibParserEx.GHC.Hs.Expr
import Refact.Types
class Brackets a where
Remove one paren or nothing if there is no paren .
addParen :: a -> a -- Write out a paren.
-- | Is this item lexically requiring no bracketing ever i.e. is
-- totally atomic.
isAtom :: a -> Bool
-- | Is the child safe free from brackets in the parent
-- position. Err on the side of caution, True = don't know.
needBracket :: Int -> a -> a -> Bool
findType :: a -> RType
instance Brackets (LocatedA (HsExpr GhcPs)) where
When GHC parses a section in concrete syntax , it will produce an
-- 'HsPar (Section[L|R])'. There is no concrete syntax that will
-- result in a "naked" section. Consequently, given an expression,
when stripping brackets ( c.f . ' Hint . Brackets ) , do n't remove the
paren 's surrounding a section - they are required .
remParen (L _ (HsPar _ _ (L _ SectionL{}) _)) = Nothing
remParen (L _ (HsPar _ _ (L _ SectionR{}) _)) = Nothing
remParen (L _ (HsPar _ _ x _)) = Just x
remParen _ = Nothing
addParen = nlHsPar
isAtom (L _ x) = case x of
HsVar{} -> True
HsUnboundVar{} -> True
-- Technically atomic, but lots of people think it shouldn't be
HsRecSel{} -> False
HsOverLabel{} -> True
HsIPVar{} -> True
-- Note that sections aren't atoms (but parenthesized sections are).
HsPar{} -> True
ExplicitTuple{} -> True
ExplicitSum{} -> True
ExplicitList{} -> True
RecordCon{} -> True
RecordUpd{} -> True
ArithSeq{}-> True
HsTypedBracket{} -> True
HsUntypedBracket{} -> True
HsSplice might be $ foo , where @($foo ) would require brackets ,
but in that case the $ foo is a type , so we can still mark Splice as atomic
HsSpliceE{} -> True
HsOverLit _ x | not $ isNegativeOverLit x -> True
HsLit _ x | not $ isNegativeLit x -> True
_ -> False
where
isNegativeLit (HsInt _ i) = il_neg i
isNegativeLit (HsRat _ f _) = fl_neg f
isNegativeLit (HsFloatPrim _ f) = fl_neg f
isNegativeLit (HsDoublePrim _ f) = fl_neg f
isNegativeLit (HsIntPrim _ x) = x < 0
isNegativeLit (HsInt64Prim _ x) = x < 0
isNegativeLit (HsInteger _ x _) = x < 0
isNegativeLit _ = False
isNegativeOverLit OverLit {ol_val=HsIntegral i} = il_neg i
isNegativeOverLit OverLit {ol_val=HsFractional f} = fl_neg f
isNegativeOverLit _ = False
isAtom _ = False -- '{-# COMPLETE L #-}'
needBracket i parent child -- Note: i is the index in children, not in the AST.
| isAtom child = False
| isSection parent, L _ HsApp{} <- child = False
| L _ OpApp{} <- parent, L _ HsApp{} <- child, i /= 0 || isAtomOrApp child = False
| L _ ExplicitList{} <- parent = False
| L _ ExplicitTuple{} <- parent = False
| L _ HsIf{} <- parent, isAnyApp child = False
| L _ HsApp{} <- parent, i == 0, L _ HsApp{} <- child = False
| L _ ExprWithTySig{} <- parent, i == 0, isApp child = False
| L _ RecordCon{} <- parent = False
| L _ RecordUpd{} <- parent, i /= 0 = False
-- These all have view patterns embedded within them, or are naturally followed by ->, so we have to watch out for
@(x::y ) - > z@ which is valid , as either a type annotation , or a view pattern .
| L _ HsLet{} <- parent, isApp child = False
| L _ HsDo{} <- parent, isAnyApp child = False
| L _ HsLam{} <- parent, isAnyApp child = False
| L _ HsCase{} <- parent, isAnyApp child = False
| L _ HsPar{} <- parent = False
| otherwise = True
findType _ = Expr
| Am I an HsApp such that having me in an infix does n't require brackets .
Before BlockArguments that was _ all _ HsApps . Now , imagine :
--
-- (f \x -> x) *> ...
-- (f do x) *> ...
isAtomOrApp :: LocatedA (HsExpr GhcPs) -> Bool
isAtomOrApp x | isAtom x = True
isAtomOrApp (L _ (HsApp _ _ x)) = isAtomOrApp x
isAtomOrApp _ = False
instance Brackets (LocatedA (Pat GhcPs)) where
remParen (L _ (ParPat _ _ x _)) = Just x
remParen _ = Nothing
addParen = nlParPat
isAtom (L _ x) = case x of
ParPat{} -> True
TuplePat{} -> True
ListPat{} -> True
-- This is technically atomic, but lots of people think it shouldn't be
ConPat _ _ RecCon{} -> False
ConPat _ _ (PrefixCon _ []) -> True
VarPat{} -> True
WildPat{} -> True
SumPat{} -> True
AsPat{} -> True
SplicePat{} -> True
LitPat _ x | not $ isSignedLit x -> True
_ -> False
where
isSignedLit HsInt{} = True
isSignedLit HsIntPrim{} = True
isSignedLit HsInt64Prim{} = True
isSignedLit HsInteger{} = True
isSignedLit HsRat{} = True
isSignedLit HsFloatPrim{} = True
isSignedLit HsDoublePrim{} = True
isSignedLit _ = False
isAtom _ = False -- '{-# COMPLETE L #-}'
needBracket _ parent child
| isAtom child = False
| L _ TuplePat{} <- parent = False
| L _ ListPat{} <- parent = False
| otherwise = True
findType _ = Pattern
instance Brackets (LocatedA (HsType GhcPs)) where
remParen (L _ (HsParTy _ x)) = Just x
remParen _ = Nothing
addParen e = noLocA $ HsParTy EpAnnNotUsed e
isAtom (L _ x) = case x of
HsParTy{} -> True
HsTupleTy{} -> True
HsListTy{} -> True
HsExplicitTupleTy{} -> True
HsExplicitListTy{} -> True
HsTyVar{} -> True
HsSumTy{} -> True
HsWildCardTy{} -> True
-- HsSpliceTy{} is not atomic, because of @($foo)
_ -> False
isAtom _ = False -- '{-# COMPLETE L #-}'
needBracket _ parent child
| isAtom child = False
-- a -> (b -> c) is not a required bracket, but useful for documentation about arity etc.
| TyFun { } < - parent , i = = 1 , TyFun { } < - child = False
| L _ HsFunTy{} <- parent, L _ HsAppTy{} <- child = False
| L _ HsTupleTy{} <- parent = False
| L _ HsListTy{} <- parent = False
| L _ HsExplicitTupleTy{} <- parent = False
| L _ HsListTy{} <- parent = False
| L _ HsExplicitListTy{} <- parent = False
| L _ HsOpTy{} <- parent, L _ HsAppTy{} <- child = False
| L _ HsParTy{} <- parent = False
| otherwise = True
findType _ = Type
| null | https://raw.githubusercontent.com/ndmitchell/hlint/d06148be027179469333a892294fb7a90b9ea51c/src/GHC/Util/Brackets.hs | haskell | Write out a paren.
| Is this item lexically requiring no bracketing ever i.e. is
totally atomic.
| Is the child safe free from brackets in the parent
position. Err on the side of caution, True = don't know.
'HsPar (Section[L|R])'. There is no concrete syntax that will
result in a "naked" section. Consequently, given an expression,
Technically atomic, but lots of people think it shouldn't be
Note that sections aren't atoms (but parenthesized sections are).
'{-# COMPLETE L #-}'
Note: i is the index in children, not in the AST.
These all have view patterns embedded within them, or are naturally followed by ->, so we have to watch out for
(f \x -> x) *> ...
(f do x) *> ...
This is technically atomic, but lots of people think it shouldn't be
'{-# COMPLETE L #-}'
HsSpliceTy{} is not atomic, because of @($foo)
'{-# COMPLETE L #-}'
a -> (b -> c) is not a required bracket, but useful for documentation about arity etc. | # LANGUAGE MultiParamTypeClasses , FlexibleInstances , FlexibleContexts #
# OPTIONS_GHC -Wno - incomplete - patterns -Wno - overlapping - patterns #
module GHC.Util.Brackets (Brackets(..), isApp,isOpApp,isAnyApp) where
import GHC.Hs
import GHC.Types.SrcLoc
import GHC.Types.SourceText
import Language.Haskell.GhclibParserEx.GHC.Hs.Expr
import Refact.Types
class Brackets a where
Remove one paren or nothing if there is no paren .
isAtom :: a -> Bool
needBracket :: Int -> a -> a -> Bool
findType :: a -> RType
instance Brackets (LocatedA (HsExpr GhcPs)) where
When GHC parses a section in concrete syntax , it will produce an
when stripping brackets ( c.f . ' Hint . Brackets ) , do n't remove the
paren 's surrounding a section - they are required .
remParen (L _ (HsPar _ _ (L _ SectionL{}) _)) = Nothing
remParen (L _ (HsPar _ _ (L _ SectionR{}) _)) = Nothing
remParen (L _ (HsPar _ _ x _)) = Just x
remParen _ = Nothing
addParen = nlHsPar
isAtom (L _ x) = case x of
HsVar{} -> True
HsUnboundVar{} -> True
HsRecSel{} -> False
HsOverLabel{} -> True
HsIPVar{} -> True
HsPar{} -> True
ExplicitTuple{} -> True
ExplicitSum{} -> True
ExplicitList{} -> True
RecordCon{} -> True
RecordUpd{} -> True
ArithSeq{}-> True
HsTypedBracket{} -> True
HsUntypedBracket{} -> True
HsSplice might be $ foo , where @($foo ) would require brackets ,
but in that case the $ foo is a type , so we can still mark Splice as atomic
HsSpliceE{} -> True
HsOverLit _ x | not $ isNegativeOverLit x -> True
HsLit _ x | not $ isNegativeLit x -> True
_ -> False
where
isNegativeLit (HsInt _ i) = il_neg i
isNegativeLit (HsRat _ f _) = fl_neg f
isNegativeLit (HsFloatPrim _ f) = fl_neg f
isNegativeLit (HsDoublePrim _ f) = fl_neg f
isNegativeLit (HsIntPrim _ x) = x < 0
isNegativeLit (HsInt64Prim _ x) = x < 0
isNegativeLit (HsInteger _ x _) = x < 0
isNegativeLit _ = False
isNegativeOverLit OverLit {ol_val=HsIntegral i} = il_neg i
isNegativeOverLit OverLit {ol_val=HsFractional f} = fl_neg f
isNegativeOverLit _ = False
| isAtom child = False
| isSection parent, L _ HsApp{} <- child = False
| L _ OpApp{} <- parent, L _ HsApp{} <- child, i /= 0 || isAtomOrApp child = False
| L _ ExplicitList{} <- parent = False
| L _ ExplicitTuple{} <- parent = False
| L _ HsIf{} <- parent, isAnyApp child = False
| L _ HsApp{} <- parent, i == 0, L _ HsApp{} <- child = False
| L _ ExprWithTySig{} <- parent, i == 0, isApp child = False
| L _ RecordCon{} <- parent = False
| L _ RecordUpd{} <- parent, i /= 0 = False
@(x::y ) - > z@ which is valid , as either a type annotation , or a view pattern .
| L _ HsLet{} <- parent, isApp child = False
| L _ HsDo{} <- parent, isAnyApp child = False
| L _ HsLam{} <- parent, isAnyApp child = False
| L _ HsCase{} <- parent, isAnyApp child = False
| L _ HsPar{} <- parent = False
| otherwise = True
findType _ = Expr
| Am I an HsApp such that having me in an infix does n't require brackets .
Before BlockArguments that was _ all _ HsApps . Now , imagine :
isAtomOrApp :: LocatedA (HsExpr GhcPs) -> Bool
isAtomOrApp x | isAtom x = True
isAtomOrApp (L _ (HsApp _ _ x)) = isAtomOrApp x
isAtomOrApp _ = False
instance Brackets (LocatedA (Pat GhcPs)) where
remParen (L _ (ParPat _ _ x _)) = Just x
remParen _ = Nothing
addParen = nlParPat
isAtom (L _ x) = case x of
ParPat{} -> True
TuplePat{} -> True
ListPat{} -> True
ConPat _ _ RecCon{} -> False
ConPat _ _ (PrefixCon _ []) -> True
VarPat{} -> True
WildPat{} -> True
SumPat{} -> True
AsPat{} -> True
SplicePat{} -> True
LitPat _ x | not $ isSignedLit x -> True
_ -> False
where
isSignedLit HsInt{} = True
isSignedLit HsIntPrim{} = True
isSignedLit HsInt64Prim{} = True
isSignedLit HsInteger{} = True
isSignedLit HsRat{} = True
isSignedLit HsFloatPrim{} = True
isSignedLit HsDoublePrim{} = True
isSignedLit _ = False
needBracket _ parent child
| isAtom child = False
| L _ TuplePat{} <- parent = False
| L _ ListPat{} <- parent = False
| otherwise = True
findType _ = Pattern
instance Brackets (LocatedA (HsType GhcPs)) where
remParen (L _ (HsParTy _ x)) = Just x
remParen _ = Nothing
addParen e = noLocA $ HsParTy EpAnnNotUsed e
isAtom (L _ x) = case x of
HsParTy{} -> True
HsTupleTy{} -> True
HsListTy{} -> True
HsExplicitTupleTy{} -> True
HsExplicitListTy{} -> True
HsTyVar{} -> True
HsSumTy{} -> True
HsWildCardTy{} -> True
_ -> False
needBracket _ parent child
| isAtom child = False
| TyFun { } < - parent , i = = 1 , TyFun { } < - child = False
| L _ HsFunTy{} <- parent, L _ HsAppTy{} <- child = False
| L _ HsTupleTy{} <- parent = False
| L _ HsListTy{} <- parent = False
| L _ HsExplicitTupleTy{} <- parent = False
| L _ HsListTy{} <- parent = False
| L _ HsExplicitListTy{} <- parent = False
| L _ HsOpTy{} <- parent, L _ HsAppTy{} <- child = False
| L _ HsParTy{} <- parent = False
| otherwise = True
findType _ = Type
|
f564d08b2dc83049e5bc9777be26252bda59f9150aeeebef3de7e63b2bd78ff8 | c4-project/c4f | atomic_cmpxchg.mli | This file is part of c4f .
Copyright ( c ) 2018 - 2022 C4 Project
c4 t itself is licensed under the MIT License . See the LICENSE file in the
project root for more information .
Parts of c4 t are based on code from the Herdtools7 project
( ) : see the LICENSE.herd file in the
project root for more information .
Copyright (c) 2018-2022 C4 Project
c4t itself is licensed under the MIT License. See the LICENSE file in the
project root for more information.
Parts of c4t are based on code from the Herdtools7 project
() : see the LICENSE.herd file in the
project root for more information. *)
open Base
open Import
(** Atomic compare-exchange operations.
Atomic compare-exchanges can appear in statement position (where the
boolean output is ignored) or expression position (where it isn't).
Because of the possibility of them being in expressions, most of this
module is parametric on an expression type to avoid cycles. *)
* { 1 Strength of compare - exchanges }
As usual with C11 , there are two types of compare - exchange : weak , which
is allowed to fail spuriously , and strong , which is n't .
As usual with C11, there are two types of compare-exchange: weak, which
is allowed to fail spuriously, and strong, which isn't. *)
module Strength : sig
(** Enumeration of compare-exchange strengths. *)
type t =
| Strong (** Strong compare-exchanges. *)
| Weak (** Weak compare-exchanges. *)
include Utils.Enum_types.Extension_table with type t := t
end
* { 1 Compare - exchanges }
* Type of compare - exchanges , parametrised on expressions.contents
Compare - exchanges can be quickchecked , given appropriate expression
endpoints . While this quickcheck generates appropriate memory orders , it
will use random addresses and strengths , and is unsuitable for
type - safe / environment - safe generation .
Compare-exchanges can be quickchecked, given appropriate expression
endpoints. While this quickcheck generates appropriate memory orders, it
will use random addresses and strengths, and is unsuitable for
type-safe/environment-safe generation. *)
type 'e t =
{ obj: Address.t
; expected: Address.t
; desired: 'e
; strength: Strength.t
; succ: Mem_order.t
; fail: Mem_order.t }
[@@deriving sexp, accessors, compare, equal, quickcheck]
* { 2 Traversal primitives }
(** Primitive building block for producing traversals over atomic cmpxchg.
For module recursion reasons, we don't provide actual traversals here;
see {!Expression}. *)
module Base_map (Ap : Applicative.S) : sig
val bmap :
'a t
-> obj:(Address.t -> Address.t Ap.t)
-> expected:(Address.t -> Address.t Ap.t)
-> desired:('a -> 'b Ap.t)
-> strength:(Strength.t -> Strength.t Ap.t)
-> succ:(Mem_order.t -> Mem_order.t Ap.t)
-> fail:(Mem_order.t -> Mem_order.t Ap.t)
-> 'b t Ap.t
* [ bmap x ~obj ~expected ~desired ~strength ~succ ~fail ] applies the
respective applicative computations to each part of [ x ] .
respective applicative computations to each part of [x]. *)
end
(** {1 Interface implementations} *)
include Expression_types.S_atomic with type 'e t := 'e t
| null | https://raw.githubusercontent.com/c4-project/c4f/8939477732861789abc807c8c1532a302b2848a5/lib/fir/src/atomic_cmpxchg.mli | ocaml | * Atomic compare-exchange operations.
Atomic compare-exchanges can appear in statement position (where the
boolean output is ignored) or expression position (where it isn't).
Because of the possibility of them being in expressions, most of this
module is parametric on an expression type to avoid cycles.
* Enumeration of compare-exchange strengths.
* Strong compare-exchanges.
* Weak compare-exchanges.
* Primitive building block for producing traversals over atomic cmpxchg.
For module recursion reasons, we don't provide actual traversals here;
see {!Expression}.
* {1 Interface implementations} | This file is part of c4f .
Copyright ( c ) 2018 - 2022 C4 Project
c4 t itself is licensed under the MIT License . See the LICENSE file in the
project root for more information .
Parts of c4 t are based on code from the Herdtools7 project
( ) : see the LICENSE.herd file in the
project root for more information .
Copyright (c) 2018-2022 C4 Project
c4t itself is licensed under the MIT License. See the LICENSE file in the
project root for more information.
Parts of c4t are based on code from the Herdtools7 project
() : see the LICENSE.herd file in the
project root for more information. *)
open Base
open Import
* { 1 Strength of compare - exchanges }
As usual with C11 , there are two types of compare - exchange : weak , which
is allowed to fail spuriously , and strong , which is n't .
As usual with C11, there are two types of compare-exchange: weak, which
is allowed to fail spuriously, and strong, which isn't. *)
module Strength : sig
type t =
include Utils.Enum_types.Extension_table with type t := t
end
* { 1 Compare - exchanges }
* Type of compare - exchanges , parametrised on expressions.contents
Compare - exchanges can be quickchecked , given appropriate expression
endpoints . While this quickcheck generates appropriate memory orders , it
will use random addresses and strengths , and is unsuitable for
type - safe / environment - safe generation .
Compare-exchanges can be quickchecked, given appropriate expression
endpoints. While this quickcheck generates appropriate memory orders, it
will use random addresses and strengths, and is unsuitable for
type-safe/environment-safe generation. *)
type 'e t =
{ obj: Address.t
; expected: Address.t
; desired: 'e
; strength: Strength.t
; succ: Mem_order.t
; fail: Mem_order.t }
[@@deriving sexp, accessors, compare, equal, quickcheck]
* { 2 Traversal primitives }
module Base_map (Ap : Applicative.S) : sig
val bmap :
'a t
-> obj:(Address.t -> Address.t Ap.t)
-> expected:(Address.t -> Address.t Ap.t)
-> desired:('a -> 'b Ap.t)
-> strength:(Strength.t -> Strength.t Ap.t)
-> succ:(Mem_order.t -> Mem_order.t Ap.t)
-> fail:(Mem_order.t -> Mem_order.t Ap.t)
-> 'b t Ap.t
* [ bmap x ~obj ~expected ~desired ~strength ~succ ~fail ] applies the
respective applicative computations to each part of [ x ] .
respective applicative computations to each part of [x]. *)
end
include Expression_types.S_atomic with type 'e t := 'e t
|
eeb98cbd463fbd0cc6053dced7b1794efb2c47320a0d7297860ecce1f59ed182 | chshersh/iris | Mode.hs | module Test.Iris.Colour.Mode (modeSpec) where
import Data.Foldable (for_)
import System.Environment (setEnv, unsetEnv)
import System.IO (stderr, stdout)
import Test.Hspec (Spec, before_, describe, it, shouldReturn)
import Iris.Cli.Colour (ColourOption (..))
import Iris.Colour.Mode (ColourMode (..), detectColourMode)
import Test.Iris.Common (checkCI)
modeSpec :: Spec
modeSpec = before_ clearAppEnv $ describe "Mode" $ do
let detectStdoutColour option = detectColourMode stdout option (Just "myapp")
let detectStderrColour option = detectColourMode stderr option (Just "myapp")
it "DisableColour when --no-colour" $ do
detectStdoutColour Never `shouldReturn` DisableColour
detectStderrColour Never `shouldReturn` DisableColour
it "EnableColour when --colour" $ do
detectStdoutColour Always `shouldReturn` EnableColour
detectStderrColour Always `shouldReturn` EnableColour
it "EnableColour in clear environment" $ do
ciColour <- colourWithCI
detectStdoutColour Auto `shouldReturn` ciColour
detectStderrColour Auto `shouldReturn` ciColour
it "DisableColour when NO_COLOR is set" $ do
setEnv "NO_COLOR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when NO_COLOUR is set" $ do
setEnv "NO_COLOUR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when MYAPP_NO_COLOR is set" $ do
setEnv "MYAPP_NO_COLOR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when MYAPP_NO_COLOUR is set" $ do
setEnv "MYAPP_NO_COLOUR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when TERM=dumb" $ do
setEnv "TERM" "dumb"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "EnableColour when TERM=xterm-256color" $ do
setEnv "TERM" "xterm-256color"
ciColour <- colourWithCI
detectStdoutColour Auto `shouldReturn` ciColour
detectStderrColour Auto `shouldReturn` ciColour
it "DisableColour when CI is set" $ do
ciColour <- colourWithCI
detectStdoutColour Auto `shouldReturn` ciColour
detectStderrColour Auto `shouldReturn` ciColour
-- Helper functions
testEnvVars :: [String]
testEnvVars =
[ "NO_COLOR"
, "NO_COLOUR"
, "MYAPP_NO_COLOR"
, "MYAPP_NO_COLOUR"
, "TERM"
]
clearAppEnv :: IO ()
clearAppEnv = for_ testEnvVars unsetEnv
colourWithCI :: IO ColourMode
colourWithCI = do
isCi <- checkCI
pure $ if isCi then DisableColour else EnableColour
| null | https://raw.githubusercontent.com/chshersh/iris/8d5627284ac6b6814c6e026d709fde9ac07656a8/test/Test/Iris/Colour/Mode.hs | haskell | Helper functions | module Test.Iris.Colour.Mode (modeSpec) where
import Data.Foldable (for_)
import System.Environment (setEnv, unsetEnv)
import System.IO (stderr, stdout)
import Test.Hspec (Spec, before_, describe, it, shouldReturn)
import Iris.Cli.Colour (ColourOption (..))
import Iris.Colour.Mode (ColourMode (..), detectColourMode)
import Test.Iris.Common (checkCI)
modeSpec :: Spec
modeSpec = before_ clearAppEnv $ describe "Mode" $ do
let detectStdoutColour option = detectColourMode stdout option (Just "myapp")
let detectStderrColour option = detectColourMode stderr option (Just "myapp")
it "DisableColour when --no-colour" $ do
detectStdoutColour Never `shouldReturn` DisableColour
detectStderrColour Never `shouldReturn` DisableColour
it "EnableColour when --colour" $ do
detectStdoutColour Always `shouldReturn` EnableColour
detectStderrColour Always `shouldReturn` EnableColour
it "EnableColour in clear environment" $ do
ciColour <- colourWithCI
detectStdoutColour Auto `shouldReturn` ciColour
detectStderrColour Auto `shouldReturn` ciColour
it "DisableColour when NO_COLOR is set" $ do
setEnv "NO_COLOR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when NO_COLOUR is set" $ do
setEnv "NO_COLOUR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when MYAPP_NO_COLOR is set" $ do
setEnv "MYAPP_NO_COLOR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when MYAPP_NO_COLOUR is set" $ do
setEnv "MYAPP_NO_COLOUR" "1"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "DisableColour when TERM=dumb" $ do
setEnv "TERM" "dumb"
detectStdoutColour Auto `shouldReturn` DisableColour
detectStderrColour Auto `shouldReturn` DisableColour
it "EnableColour when TERM=xterm-256color" $ do
setEnv "TERM" "xterm-256color"
ciColour <- colourWithCI
detectStdoutColour Auto `shouldReturn` ciColour
detectStderrColour Auto `shouldReturn` ciColour
it "DisableColour when CI is set" $ do
ciColour <- colourWithCI
detectStdoutColour Auto `shouldReturn` ciColour
detectStderrColour Auto `shouldReturn` ciColour
testEnvVars :: [String]
testEnvVars =
[ "NO_COLOR"
, "NO_COLOUR"
, "MYAPP_NO_COLOR"
, "MYAPP_NO_COLOUR"
, "TERM"
]
clearAppEnv :: IO ()
clearAppEnv = for_ testEnvVars unsetEnv
colourWithCI :: IO ColourMode
colourWithCI = do
isCi <- checkCI
pure $ if isCi then DisableColour else EnableColour
|
363383168179953cbbee4df0e321c44b2daa72385d45b40364cadbbfd7d57d6c | fieldstrength/aeson-deriving | Main.hs | {-# Language DerivingVia #-}
{-# Language DataKinds #-}
{-# Language TemplateHaskell #-}
{-# Language DeriveAnyClass #-}
{-# Language DuplicateRecordFields #-}
module Main where
import Data.Aeson
import Data.Aeson.Deriving
import Data.Foldable (for_)
import Data.Aeson.Deriving.Text.Unsafe
import Data.Text
import GHC.Generics
import Hedgehog
import Hedgehog.Main (defaultMain)
main :: IO ()
main = defaultMain [checkParallel $$(discover)]
type IdiomaticEncoded =
GenericEncoded '[FieldLabelModifier := '[SnakeCase, DropLowercasePrefix]]
data Dog = Dog
{ dogAgeInDogYears :: Int
, dogName :: String
}
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via IdiomaticEncoded Dog
once :: Property -> Property
once = withTests 1
prop_fido_encodes_as_expected :: Property
prop_fido_encodes_as_expected = once . property $
encode (Dog 9 "fido") === "{\"name\":\"fido\",\"age_in_dog_years\":9}"
prop_fido_decodes_as_expected :: Property
prop_fido_decodes_as_expected = once . property $
tripping (Dog 9 "fido") encode eitherDecode
type UppercaseTypeTagEncoded =
GenericEncoded
'[ FieldLabelModifier := '[SnakeCase, DropLowercasePrefix]
, SumEncoding := TaggedObject "type" "contents"
, TagSingleConstructors := 'True
, ConstructorTagModifier := '[Uppercase, SnakeCase]
]
data PostArticle = PostArticle
{ articleName :: String
, articleText :: String
}
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via UppercaseTypeTagEncoded PostArticle
data DeleteArticle = DeleteArticle
{ articleId :: Int
}
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via UppercaseTypeTagEncoded DeleteArticle
data ArticleCommand
= MkPostArticle PostArticle
| MkDeleteArticle DeleteArticle
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via RecordSumEncoded "type" '[Uppercase, SnakeCase] ArticleCommand
prop_record_sum_encodes_as_expected :: Property
prop_record_sum_encodes_as_expected = once . property $
encode (MkDeleteArticle $ DeleteArticle 9)
=== "{\"id\":9,\"type\":\"DELETE_ARTICLE\"}"
prop_record_sum_decodes_as_expected :: Property
prop_record_sum_decodes_as_expected = once . property $
tripping (MkDeleteArticle $ DeleteArticle 9) encode decode
data MyVal
instance KnownJSON MyVal where jsonVal _ = Number 1
data X = X {xval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
X
& GenericEncoded '[]
& WithConstantFields
'["bar" := "baaz", "quux" := MyVal, "arr" := ["Hilbert","Dirac"]]
prop_WithConstantFields_extra_fields_encode_as_expected :: Property
prop_WithConstantFields_extra_fields_encode_as_expected = once . property $
encode (X 9)
=== "{\"xval\":9,\"arr\":[\"Hilbert\",\"Dirac\"],\"quux\":1,\"bar\":\"baaz\"}"
prop_WithConstantFields_extra_fields_decode_as_expected :: Property
prop_WithConstantFields_extra_fields_decode_as_expected = once . property $
tripping (X 9) encode decode
prop_WithConstantFields_extra_fields_required_when_decoding :: Property
prop_WithConstantFields_extra_fields_required_when_decoding = once . property $
decode @X "{\"xval\":9}" === Nothing
data X2 = X2 {xval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
WithConstantFieldsOut
'["bar" := "baaz", "quux" := "axion"]
(GenericEncoded '[] X2)
prop_WithConstantFieldsOut_encodes_as_expected :: Property
prop_WithConstantFieldsOut_encodes_as_expected = once . property $
encode (X2 9)
=== "{\"xval\":9,\"quux\":\"axion\",\"bar\":\"baaz\"}"
prop_WithConstantFieldsOut_extra_fields_not_required_when_decoding :: Property
prop_WithConstantFieldsOut_extra_fields_not_required_when_decoding = once . property $
decode @X2 "{\"xval\":9}" === Just (X2 9)
data X3 = X3 {xval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
WithConstantFieldsIn
'["bar" := "baaz", "quux" := "axion"]
(GenericEncoded '[] X3)
prop_WithConstantFieldsIn_encodes_as_expected :: Property
prop_WithConstantFieldsIn_encodes_as_expected = once . property $
encode (X3 13)
=== "{\"xval\":13}"
prop_WithConstantFieldsIn_decodes_as_expected :: Property
prop_WithConstantFieldsIn_decodes_as_expected = once . property $
decode @X3 "{\"xval\":9,\"quux\":\"axion\",\"bar\":\"baaz\"}" === Just (X3 9)
prop_WithConstantFieldsIn_extra_fields_required_when_decoding :: Property
prop_WithConstantFieldsIn_extra_fields_required_when_decoding = once . property $
decode @X3 "{\"xval\":9}" === Nothing
data Y = Y {yval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
SingleFieldObject "boop" (GenericEncoded '[] Y)
prop_single_field_objects_encode_as_expected :: Property
prop_single_field_objects_encode_as_expected = once . property $
encode (Y 7)
=== "{\"boop\":{\"yval\":7}}"
prop_single_field_objects_decode_as_expected :: Property
prop_single_field_objects_decode_as_expected = once . property $
tripping (Y 7) encode decode
data Z = Z {zval :: String}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
RemapTextField "zval" "bad" "good" (GenericEncoded '[] Z)
prop_remapped_text_fields_encode_as_expected :: Property
prop_remapped_text_fields_encode_as_expected = once . property $ do
encode (Z "bad") === "{\"zval\":\"good\"}"
encode (Z "cat") === "{\"zval\":\"cat\"}"
prop_remapped_text_fields_decode_as_expected :: Property
prop_remapped_text_fields_decode_as_expected = once . property $ do
tripping (Z "bad") encode decode
tripping (Z "cat") encode decode
Just (Z "bad") === decode "{\"zval\":\"good\"}"
Just (Z "cat") === decode "{\"zval\":\"cat\"}"
data Reserved = Reserved
{ type_ :: String
, xyzmodule :: Int
, control :: Char
}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
Reserved &
GenericEncoded
'[FieldLabelModifier := [DropPrefix "xyz", DropSuffix "_"]]
prop_drop_prefix_suffix_fields_encode_as_expected :: Property
prop_drop_prefix_suffix_fields_encode_as_expected = once . property $ do
encode (Reserved "Sen" 9 'x') ===
"{\"control\":\"x\",\"module\":9,\"type\":\"Sen\"}"
prop_drop_prefix_suffix_fields_decode_as_expected :: Property
prop_drop_prefix_suffix_fields_decode_as_expected = once . property $ do
tripping (Reserved "Sen" 9 'x') encode decode
Just (Reserved "Sen" 9 'x') ===
decode "{\"control\":\"x\",\"module\":9,\"type\":\"Sen\"}"
newtype DashSeparatedWords = DashSeparatedWords Text
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via TextWithPattern "^([A-Za-z]+-)*[A-Za-z]+$"
prop_accepts_matches :: Property
prop_accepts_matches = once . property $ do
tripping (DashSeparatedWords "foo-bar-baz") encode decode
Just (DashSeparatedWords "foo-bar-baz") === decode "\"foo-bar-baz\""
prop_rejects_non_matches :: Property
prop_rejects_non_matches = once . property $ do
Left "Error in $: must match regex ^([A-Za-z]+-)*[A-Za-z]+$" === eitherDecode @DashSeparatedWords "\"foo.42\""
data Heavy = BlackHole | NeutronStar
deriving stock (Generic, Show, Eq, Ord, Bounded, Enum)
deriving (FromJSON, ToJSON) via GenericEncoded '[ConstructorTagModifier := FirstChar Lowercase] Heavy
prop_first_char_modifier_encodes_as_expected :: Property
prop_first_char_modifier_encodes_as_expected = once . property $ do
encode BlackHole === "\"blackHole\""
encode NeutronStar === "\"neutronStar\""
prop_first_char_modifier_decodes_as_expected :: Property
prop_first_char_modifier_decodes_as_expected = once . property $ do
for_ [BlackHole ..] $ \x ->
tripping x encode decode
data Unity = Unity
deriving (Show, Eq, Generic)
deriving (FromJSON, ToJSON) via EmptyObject Unity
prop_empty_object_encodes_as_expected :: Property
prop_empty_object_encodes_as_expected = once . property $ do
encode Unity === "{}"
prop_empty_object_decodes_as_expected :: Property
prop_empty_object_decodes_as_expected = once . property $ tripping Unity encode decode
-- An example of how to require a particular constant object
data Requirements = Requirements
deriving (Show, Eq, Generic)
deriving (FromJSON, ToJSON) via
WithConstantFields
'[ "api_version" := "2.0"
, "check_performed" := 'True
]
(EmptyObject Requirements)
prop_constant_object_encodes_as_expected :: Property
prop_constant_object_encodes_as_expected = once . property $
encode Requirements === "{\"api_version\":\"2.0\",\"check_performed\":true}"
prop_constant_object_decodes_as_expected :: Property
prop_constant_object_decodes_as_expected = once . property $ tripping Requirements encode decode
prop_reject_constant_object_with_incorrect_details :: Property
prop_reject_constant_object_with_incorrect_details = once . property $
eitherDecode @Requirements "{\"api_version\":\"2.0\",\"check_performed\":false}"
=== Left "Error in $: Expected constant value \"true\" but got: \"false\""
| null | https://raw.githubusercontent.com/fieldstrength/aeson-deriving/861dc8ebc05792a0924e8fe6aae2ae41a59fc751/test/Main.hs | haskell | # Language DerivingVia #
# Language DataKinds #
# Language TemplateHaskell #
# Language DeriveAnyClass #
# Language DuplicateRecordFields #
An example of how to require a particular constant object |
module Main where
import Data.Aeson
import Data.Aeson.Deriving
import Data.Foldable (for_)
import Data.Aeson.Deriving.Text.Unsafe
import Data.Text
import GHC.Generics
import Hedgehog
import Hedgehog.Main (defaultMain)
main :: IO ()
main = defaultMain [checkParallel $$(discover)]
type IdiomaticEncoded =
GenericEncoded '[FieldLabelModifier := '[SnakeCase, DropLowercasePrefix]]
data Dog = Dog
{ dogAgeInDogYears :: Int
, dogName :: String
}
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via IdiomaticEncoded Dog
once :: Property -> Property
once = withTests 1
prop_fido_encodes_as_expected :: Property
prop_fido_encodes_as_expected = once . property $
encode (Dog 9 "fido") === "{\"name\":\"fido\",\"age_in_dog_years\":9}"
prop_fido_decodes_as_expected :: Property
prop_fido_decodes_as_expected = once . property $
tripping (Dog 9 "fido") encode eitherDecode
type UppercaseTypeTagEncoded =
GenericEncoded
'[ FieldLabelModifier := '[SnakeCase, DropLowercasePrefix]
, SumEncoding := TaggedObject "type" "contents"
, TagSingleConstructors := 'True
, ConstructorTagModifier := '[Uppercase, SnakeCase]
]
data PostArticle = PostArticle
{ articleName :: String
, articleText :: String
}
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via UppercaseTypeTagEncoded PostArticle
data DeleteArticle = DeleteArticle
{ articleId :: Int
}
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via UppercaseTypeTagEncoded DeleteArticle
data ArticleCommand
= MkPostArticle PostArticle
| MkDeleteArticle DeleteArticle
deriving stock (Generic, Show, Eq)
deriving (ToJSON, FromJSON) via RecordSumEncoded "type" '[Uppercase, SnakeCase] ArticleCommand
prop_record_sum_encodes_as_expected :: Property
prop_record_sum_encodes_as_expected = once . property $
encode (MkDeleteArticle $ DeleteArticle 9)
=== "{\"id\":9,\"type\":\"DELETE_ARTICLE\"}"
prop_record_sum_decodes_as_expected :: Property
prop_record_sum_decodes_as_expected = once . property $
tripping (MkDeleteArticle $ DeleteArticle 9) encode decode
data MyVal
instance KnownJSON MyVal where jsonVal _ = Number 1
data X = X {xval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
X
& GenericEncoded '[]
& WithConstantFields
'["bar" := "baaz", "quux" := MyVal, "arr" := ["Hilbert","Dirac"]]
prop_WithConstantFields_extra_fields_encode_as_expected :: Property
prop_WithConstantFields_extra_fields_encode_as_expected = once . property $
encode (X 9)
=== "{\"xval\":9,\"arr\":[\"Hilbert\",\"Dirac\"],\"quux\":1,\"bar\":\"baaz\"}"
prop_WithConstantFields_extra_fields_decode_as_expected :: Property
prop_WithConstantFields_extra_fields_decode_as_expected = once . property $
tripping (X 9) encode decode
prop_WithConstantFields_extra_fields_required_when_decoding :: Property
prop_WithConstantFields_extra_fields_required_when_decoding = once . property $
decode @X "{\"xval\":9}" === Nothing
data X2 = X2 {xval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
WithConstantFieldsOut
'["bar" := "baaz", "quux" := "axion"]
(GenericEncoded '[] X2)
prop_WithConstantFieldsOut_encodes_as_expected :: Property
prop_WithConstantFieldsOut_encodes_as_expected = once . property $
encode (X2 9)
=== "{\"xval\":9,\"quux\":\"axion\",\"bar\":\"baaz\"}"
prop_WithConstantFieldsOut_extra_fields_not_required_when_decoding :: Property
prop_WithConstantFieldsOut_extra_fields_not_required_when_decoding = once . property $
decode @X2 "{\"xval\":9}" === Just (X2 9)
data X3 = X3 {xval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
WithConstantFieldsIn
'["bar" := "baaz", "quux" := "axion"]
(GenericEncoded '[] X3)
prop_WithConstantFieldsIn_encodes_as_expected :: Property
prop_WithConstantFieldsIn_encodes_as_expected = once . property $
encode (X3 13)
=== "{\"xval\":13}"
prop_WithConstantFieldsIn_decodes_as_expected :: Property
prop_WithConstantFieldsIn_decodes_as_expected = once . property $
decode @X3 "{\"xval\":9,\"quux\":\"axion\",\"bar\":\"baaz\"}" === Just (X3 9)
prop_WithConstantFieldsIn_extra_fields_required_when_decoding :: Property
prop_WithConstantFieldsIn_extra_fields_required_when_decoding = once . property $
decode @X3 "{\"xval\":9}" === Nothing
data Y = Y {yval :: Int}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
SingleFieldObject "boop" (GenericEncoded '[] Y)
prop_single_field_objects_encode_as_expected :: Property
prop_single_field_objects_encode_as_expected = once . property $
encode (Y 7)
=== "{\"boop\":{\"yval\":7}}"
prop_single_field_objects_decode_as_expected :: Property
prop_single_field_objects_decode_as_expected = once . property $
tripping (Y 7) encode decode
data Z = Z {zval :: String}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
RemapTextField "zval" "bad" "good" (GenericEncoded '[] Z)
prop_remapped_text_fields_encode_as_expected :: Property
prop_remapped_text_fields_encode_as_expected = once . property $ do
encode (Z "bad") === "{\"zval\":\"good\"}"
encode (Z "cat") === "{\"zval\":\"cat\"}"
prop_remapped_text_fields_decode_as_expected :: Property
prop_remapped_text_fields_decode_as_expected = once . property $ do
tripping (Z "bad") encode decode
tripping (Z "cat") encode decode
Just (Z "bad") === decode "{\"zval\":\"good\"}"
Just (Z "cat") === decode "{\"zval\":\"cat\"}"
data Reserved = Reserved
{ type_ :: String
, xyzmodule :: Int
, control :: Char
}
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via
Reserved &
GenericEncoded
'[FieldLabelModifier := [DropPrefix "xyz", DropSuffix "_"]]
prop_drop_prefix_suffix_fields_encode_as_expected :: Property
prop_drop_prefix_suffix_fields_encode_as_expected = once . property $ do
encode (Reserved "Sen" 9 'x') ===
"{\"control\":\"x\",\"module\":9,\"type\":\"Sen\"}"
prop_drop_prefix_suffix_fields_decode_as_expected :: Property
prop_drop_prefix_suffix_fields_decode_as_expected = once . property $ do
tripping (Reserved "Sen" 9 'x') encode decode
Just (Reserved "Sen" 9 'x') ===
decode "{\"control\":\"x\",\"module\":9,\"type\":\"Sen\"}"
newtype DashSeparatedWords = DashSeparatedWords Text
deriving stock (Generic, Show, Eq)
deriving (FromJSON, ToJSON) via TextWithPattern "^([A-Za-z]+-)*[A-Za-z]+$"
prop_accepts_matches :: Property
prop_accepts_matches = once . property $ do
tripping (DashSeparatedWords "foo-bar-baz") encode decode
Just (DashSeparatedWords "foo-bar-baz") === decode "\"foo-bar-baz\""
prop_rejects_non_matches :: Property
prop_rejects_non_matches = once . property $ do
Left "Error in $: must match regex ^([A-Za-z]+-)*[A-Za-z]+$" === eitherDecode @DashSeparatedWords "\"foo.42\""
data Heavy = BlackHole | NeutronStar
deriving stock (Generic, Show, Eq, Ord, Bounded, Enum)
deriving (FromJSON, ToJSON) via GenericEncoded '[ConstructorTagModifier := FirstChar Lowercase] Heavy
prop_first_char_modifier_encodes_as_expected :: Property
prop_first_char_modifier_encodes_as_expected = once . property $ do
encode BlackHole === "\"blackHole\""
encode NeutronStar === "\"neutronStar\""
prop_first_char_modifier_decodes_as_expected :: Property
prop_first_char_modifier_decodes_as_expected = once . property $ do
for_ [BlackHole ..] $ \x ->
tripping x encode decode
data Unity = Unity
deriving (Show, Eq, Generic)
deriving (FromJSON, ToJSON) via EmptyObject Unity
prop_empty_object_encodes_as_expected :: Property
prop_empty_object_encodes_as_expected = once . property $ do
encode Unity === "{}"
prop_empty_object_decodes_as_expected :: Property
prop_empty_object_decodes_as_expected = once . property $ tripping Unity encode decode
data Requirements = Requirements
deriving (Show, Eq, Generic)
deriving (FromJSON, ToJSON) via
WithConstantFields
'[ "api_version" := "2.0"
, "check_performed" := 'True
]
(EmptyObject Requirements)
prop_constant_object_encodes_as_expected :: Property
prop_constant_object_encodes_as_expected = once . property $
encode Requirements === "{\"api_version\":\"2.0\",\"check_performed\":true}"
prop_constant_object_decodes_as_expected :: Property
prop_constant_object_decodes_as_expected = once . property $ tripping Requirements encode decode
prop_reject_constant_object_with_incorrect_details :: Property
prop_reject_constant_object_with_incorrect_details = once . property $
eitherDecode @Requirements "{\"api_version\":\"2.0\",\"check_performed\":false}"
=== Left "Error in $: Expected constant value \"true\" but got: \"false\""
|
a27af0c943291fc5c655993cfdbed7239cc286dd86b3963b2ffa333197fa81d8 | tezos/tezos-mirror | durable_operations_generator.ml | (*****************************************************************************)
(* *)
(* Open Source License *)
Copyright ( c ) 2023 TriliTech < >
(* *)
(* Permission is hereby granted, free of charge, to any person obtaining a *)
(* copy of this software and associated documentation files (the "Software"),*)
to deal in the Software without restriction , including without limitation
(* the rights to use, copy, modify, merge, publish, distribute, sublicense, *)
and/or sell copies of the Software , and to permit persons to whom the
(* Software is furnished to do so, subject to the following conditions: *)
(* *)
(* The above copyright notice and this permission notice shall be included *)
(* in all copies or substantial portions of the Software. *)
(* *)
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
(* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *)
(* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL *)
(* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER*)
LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
(* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER *)
(* DEALINGS IN THE SOFTWARE. *)
(* *)
(*****************************************************************************)
open QCheck2
type key = string list
(* Weighted operations list:
operations will be generated proportionally to their weight *)
type operations_distribution = (int * Durable_operation.some_op) list
type testcase = {
inital_state : (key * string) list;
operations : Durable_operation.some_input list;
}
module Operations_generator = struct
open Gen
let range i j =
WithExceptions.List.init ~loc:__LOC__ (1 + j - i) (fun n -> i + n)
let range_chars (i : char) (j : char) =
List.map Char.chr @@ range (Char.code i) (Char.code j)
let max_path_segments = 10
let gen_path_char =
Gen.oneof
[
Gen.oneofl ['.'; '-'; '_'];
Gen.map Char.chr (Gen.int_range (Char.code 'a') (Char.code 'z'));
Gen.map Char.chr (Gen.int_range (Char.code 'A') (Char.code 'Z'));
Gen.map Char.chr (Gen.int_range (Char.code '0') (Char.code '9'));
]
let gen_arbitrary_path =
let* segments = Gen.int_range 1 max_path_segments in
Gen.list_repeat segments
@@ Gen.string_size ~gen:gen_path_char (Gen.int_range 1 10)
let generate_initial_keys (initial_tree_size : int) :
(key * string) list Gen.t =
let+ keys = Gen.list_size (Gen.return initial_tree_size) gen_arbitrary_path
and+ values =
Gen.list_size
(Gen.return initial_tree_size)
(Gen.string_size ~gen:Gen.char (Gen.int_bound 2048))
in
let kv = WithExceptions.List.combine ~loc:__LOC__ keys values in
20 % of all keys go to readonly subpath
let ro_ops = Int.(div (List.length kv) 5) in
List.mapi
(fun i (k, v) -> if i < ro_ops then ("readonly" :: k, v) else (k, v))
kv
(* TODO: this will be implemented properly in next MR *)
let gen_op _trie _ops_distribution =
let+ key = gen_arbitrary_path in
Durable_operation.Some_input (Find_value, key)
let rec gen_ops ops_distribution (trie : int Trie.t) n ops =
let open Durable_operation in
if n <= 0 then Gen.return @@ List.rev ops
else
let* some_inp = gen_op trie ops_distribution in
let new_trie =
Option.value ~default:trie
@@
match some_inp with
| Some_input (Set_value_exn, (edit_readonly, key, value)) ->
Trie.set_value ~edit_readonly key (String.length value) trie
| Some_input (Copy_tree_exn, (edit_readonly, from_key, to_key)) ->
Trie.copy_tree ~edit_readonly ~from_key ~to_key trie
| Some_input (Move_tree_exn, (from_key, to_key)) ->
Trie.move_tree ~from_key ~to_key trie
| Some_input (Delete, (edit_readonly, key)) ->
Trie.delete ~edit_readonly key trie
| Some_input (Write_value_exn, (edit_readonly, key, offset, value)) ->
let new_value =
Int.max
(Option.value ~default:0 @@ Trie.get_value key trie)
(Int64.to_int offset + String.length value)
in
Trie.set_value ~edit_readonly key new_value trie
| _ -> None
in
(gen_ops [@tailcall]) ops_distribution new_trie (n - 1) (some_inp :: ops)
let gen_testcase ~(initial_size : int) ~(operations_number : int)
(distirbution : operations_distribution) =
let* init_kvs = generate_initial_keys initial_size in
let initial_trie =
List.fold_left
(fun trie (k, v) ->
WithExceptions.Option.get ~loc:__LOC__
@@ Trie.set_value ~edit_readonly:true k (String.length v) trie)
Trie.empty
init_kvs
in
let+ ops = gen_ops distirbution initial_trie operations_number [] in
{inital_state = init_kvs; operations = ops}
end
module Make_durable_operations_runner
(Durable : Durable_snapshot_util.Testable_durable_sig) =
struct
open Lwt_syntax
open Tezos_scoru_wasm_helpers.Encodings_util
open Tezos_scoru_wasm_helpers.Wasm_utils
let durable_exn_handler (act : unit -> 'a Lwt.t)
(cont : ('a, exn) result -> 'b Lwt.t) =
Lwt.try_bind
act
(fun res -> cont @@ Ok res)
(fun e ->
Tezos_scoru_wasm_durable_snapshot.Durable.(
match e with
| Invalid_key _ | Index_too_large _ | Value_not_found | Tree_not_found
| Out_of_bounds _ | Durable_empty | Readonly_value | IO_too_large
| Tezos_lazy_containers.Chunked_byte_vector.Bounds
(* TODO: /-/issues/4958 *)
| Tezos_tree_encoding.Key_not_found _ ->
cont @@ Error e
(* If it's another kind of exn:
something went wrong, re-throw it*)
| _ -> raise e))
(* Stress test doesn't care about exceptions
thrown out of functions.
It's implied that underlying Durable has already checked them.
*)
type op_res = Tree : Durable.t -> op_res | Value : 'a -> op_res
let tree_res (act : Durable.t Lwt.t) = Lwt.map (fun x -> Tree x) act
let value_res (act : 'a Lwt.t) = Lwt.map (fun x -> Value x) act
let supress_durable_exn dur (act : unit -> op_res Lwt.t) =
durable_exn_handler act (fun x ->
match x with
| Ok (Tree t) -> Lwt.return t
| Ok (Value _value) -> Lwt.return dur
| _ -> Lwt.return dur)
(* Create new tree with passed list of key values *)
let initialize_tree (kvs : (key * string) list) =
let open Lwt_syntax in
let open Tezos_scoru_wasm_durable_snapshot in
let ro, wo =
List.partition
(fun (k, _) -> Option.equal String.equal (List.hd k) (Some "readonly"))
kvs
in
let ro = List.map (fun (k, v) -> (Durable_operation.key_to_str k, v)) ro in
let wo = List.map (fun (k, v) -> (Durable_operation.key_to_str k, v)) wo in
(* Create Durable_storage out of WO keys.
Basically taking advantage of Current durable encoding
*)
let* init_wo = Lwt.map Durable.of_storage_exn @@ make_durable wo in
(* Add RO keys in the tree *)
let* init_tezos_durable =
Lwt_list.fold_left_s
(fun dur (k, v) ->
Durable.set_value_exn
~edit_readonly:true
dur
(Durable.key_of_string_exn k)
v)
init_wo
ro
in
Encode tree to the irmin one
let* init_tree = empty_tree () in
Tree_encoding_runner.encode
Tezos_scoru_wasm_durable_snapshot.Durable.encoding
init_tezos_durable
init_tree
let run_testcase {inital_state; operations} =
let open Durable_operation in
let* init_tree = initialize_tree inital_state in
(* Decode initial paired durable *)
let* init_durable =
Tree_encoding_runner.decode Durable.encoding init_tree
in
Lwt_list.fold_left_s
(fun dur op ->
supress_durable_exn dur @@ fun () ->
match op with
| Some_input (Find_value, key) ->
value_res
@@ Durable.find_value
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Find_value_exn, key) ->
value_res
@@ Durable.find_value_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Set_value_exn, (edit_readonly, key, value)) ->
tree_res
@@ Durable.set_value_exn
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
value
| Some_input (Copy_tree_exn, (edit_readonly, from, to_)) ->
tree_res
@@ Durable.copy_tree_exn
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str from)
(Durable.key_of_string_exn @@ Durable_operation.key_to_str to_)
| Some_input (Move_tree_exn, (from, to_)) ->
tree_res
@@ Durable.move_tree_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str from)
(Durable.key_of_string_exn @@ Durable_operation.key_to_str to_)
| Some_input (Delete, (edit_readonly, key)) ->
tree_res
@@ Durable.delete
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (List, key) ->
value_res
@@ Durable.list
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Count_subtrees, key) ->
value_res
@@ Durable.count_subtrees
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Substree_name_at, (key, idx)) ->
value_res
@@ Durable.subtree_name_at
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
idx
| Some_input (Hash, key) ->
value_res
@@ Durable.hash
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Hash_exn, key) ->
value_res
@@ Durable.hash_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Write_value_exn, (edit_readonly, key, offset, value)) ->
tree_res
@@ Durable.write_value_exn
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
offset
value
| Some_input (Read_value_exn, (key, offset, len)) ->
value_res
@@ Durable.read_value_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
offset
len)
init_durable
operations
end
| null | https://raw.githubusercontent.com/tezos/tezos-mirror/a305d9ded27e234c7937fce7adfc1f199196283a/src/lib_scoru_wasm/test/helpers/durable_operations_generator.ml | ocaml | ***************************************************************************
Open Source License
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
the rights to use, copy, modify, merge, publish, distribute, sublicense,
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
***************************************************************************
Weighted operations list:
operations will be generated proportionally to their weight
TODO: this will be implemented properly in next MR
TODO: /-/issues/4958
If it's another kind of exn:
something went wrong, re-throw it
Stress test doesn't care about exceptions
thrown out of functions.
It's implied that underlying Durable has already checked them.
Create new tree with passed list of key values
Create Durable_storage out of WO keys.
Basically taking advantage of Current durable encoding
Add RO keys in the tree
Decode initial paired durable | Copyright ( c ) 2023 TriliTech < >
to deal in the Software without restriction , including without limitation
and/or sell copies of the Software , and to permit persons to whom the
THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
open QCheck2
type key = string list
type operations_distribution = (int * Durable_operation.some_op) list
type testcase = {
inital_state : (key * string) list;
operations : Durable_operation.some_input list;
}
module Operations_generator = struct
open Gen
let range i j =
WithExceptions.List.init ~loc:__LOC__ (1 + j - i) (fun n -> i + n)
let range_chars (i : char) (j : char) =
List.map Char.chr @@ range (Char.code i) (Char.code j)
let max_path_segments = 10
let gen_path_char =
Gen.oneof
[
Gen.oneofl ['.'; '-'; '_'];
Gen.map Char.chr (Gen.int_range (Char.code 'a') (Char.code 'z'));
Gen.map Char.chr (Gen.int_range (Char.code 'A') (Char.code 'Z'));
Gen.map Char.chr (Gen.int_range (Char.code '0') (Char.code '9'));
]
let gen_arbitrary_path =
let* segments = Gen.int_range 1 max_path_segments in
Gen.list_repeat segments
@@ Gen.string_size ~gen:gen_path_char (Gen.int_range 1 10)
let generate_initial_keys (initial_tree_size : int) :
(key * string) list Gen.t =
let+ keys = Gen.list_size (Gen.return initial_tree_size) gen_arbitrary_path
and+ values =
Gen.list_size
(Gen.return initial_tree_size)
(Gen.string_size ~gen:Gen.char (Gen.int_bound 2048))
in
let kv = WithExceptions.List.combine ~loc:__LOC__ keys values in
20 % of all keys go to readonly subpath
let ro_ops = Int.(div (List.length kv) 5) in
List.mapi
(fun i (k, v) -> if i < ro_ops then ("readonly" :: k, v) else (k, v))
kv
let gen_op _trie _ops_distribution =
let+ key = gen_arbitrary_path in
Durable_operation.Some_input (Find_value, key)
let rec gen_ops ops_distribution (trie : int Trie.t) n ops =
let open Durable_operation in
if n <= 0 then Gen.return @@ List.rev ops
else
let* some_inp = gen_op trie ops_distribution in
let new_trie =
Option.value ~default:trie
@@
match some_inp with
| Some_input (Set_value_exn, (edit_readonly, key, value)) ->
Trie.set_value ~edit_readonly key (String.length value) trie
| Some_input (Copy_tree_exn, (edit_readonly, from_key, to_key)) ->
Trie.copy_tree ~edit_readonly ~from_key ~to_key trie
| Some_input (Move_tree_exn, (from_key, to_key)) ->
Trie.move_tree ~from_key ~to_key trie
| Some_input (Delete, (edit_readonly, key)) ->
Trie.delete ~edit_readonly key trie
| Some_input (Write_value_exn, (edit_readonly, key, offset, value)) ->
let new_value =
Int.max
(Option.value ~default:0 @@ Trie.get_value key trie)
(Int64.to_int offset + String.length value)
in
Trie.set_value ~edit_readonly key new_value trie
| _ -> None
in
(gen_ops [@tailcall]) ops_distribution new_trie (n - 1) (some_inp :: ops)
let gen_testcase ~(initial_size : int) ~(operations_number : int)
(distirbution : operations_distribution) =
let* init_kvs = generate_initial_keys initial_size in
let initial_trie =
List.fold_left
(fun trie (k, v) ->
WithExceptions.Option.get ~loc:__LOC__
@@ Trie.set_value ~edit_readonly:true k (String.length v) trie)
Trie.empty
init_kvs
in
let+ ops = gen_ops distirbution initial_trie operations_number [] in
{inital_state = init_kvs; operations = ops}
end
module Make_durable_operations_runner
(Durable : Durable_snapshot_util.Testable_durable_sig) =
struct
open Lwt_syntax
open Tezos_scoru_wasm_helpers.Encodings_util
open Tezos_scoru_wasm_helpers.Wasm_utils
let durable_exn_handler (act : unit -> 'a Lwt.t)
(cont : ('a, exn) result -> 'b Lwt.t) =
Lwt.try_bind
act
(fun res -> cont @@ Ok res)
(fun e ->
Tezos_scoru_wasm_durable_snapshot.Durable.(
match e with
| Invalid_key _ | Index_too_large _ | Value_not_found | Tree_not_found
| Out_of_bounds _ | Durable_empty | Readonly_value | IO_too_large
| Tezos_lazy_containers.Chunked_byte_vector.Bounds
| Tezos_tree_encoding.Key_not_found _ ->
cont @@ Error e
| _ -> raise e))
type op_res = Tree : Durable.t -> op_res | Value : 'a -> op_res
let tree_res (act : Durable.t Lwt.t) = Lwt.map (fun x -> Tree x) act
let value_res (act : 'a Lwt.t) = Lwt.map (fun x -> Value x) act
let supress_durable_exn dur (act : unit -> op_res Lwt.t) =
durable_exn_handler act (fun x ->
match x with
| Ok (Tree t) -> Lwt.return t
| Ok (Value _value) -> Lwt.return dur
| _ -> Lwt.return dur)
let initialize_tree (kvs : (key * string) list) =
let open Lwt_syntax in
let open Tezos_scoru_wasm_durable_snapshot in
let ro, wo =
List.partition
(fun (k, _) -> Option.equal String.equal (List.hd k) (Some "readonly"))
kvs
in
let ro = List.map (fun (k, v) -> (Durable_operation.key_to_str k, v)) ro in
let wo = List.map (fun (k, v) -> (Durable_operation.key_to_str k, v)) wo in
let* init_wo = Lwt.map Durable.of_storage_exn @@ make_durable wo in
let* init_tezos_durable =
Lwt_list.fold_left_s
(fun dur (k, v) ->
Durable.set_value_exn
~edit_readonly:true
dur
(Durable.key_of_string_exn k)
v)
init_wo
ro
in
Encode tree to the irmin one
let* init_tree = empty_tree () in
Tree_encoding_runner.encode
Tezos_scoru_wasm_durable_snapshot.Durable.encoding
init_tezos_durable
init_tree
let run_testcase {inital_state; operations} =
let open Durable_operation in
let* init_tree = initialize_tree inital_state in
let* init_durable =
Tree_encoding_runner.decode Durable.encoding init_tree
in
Lwt_list.fold_left_s
(fun dur op ->
supress_durable_exn dur @@ fun () ->
match op with
| Some_input (Find_value, key) ->
value_res
@@ Durable.find_value
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Find_value_exn, key) ->
value_res
@@ Durable.find_value_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Set_value_exn, (edit_readonly, key, value)) ->
tree_res
@@ Durable.set_value_exn
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
value
| Some_input (Copy_tree_exn, (edit_readonly, from, to_)) ->
tree_res
@@ Durable.copy_tree_exn
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str from)
(Durable.key_of_string_exn @@ Durable_operation.key_to_str to_)
| Some_input (Move_tree_exn, (from, to_)) ->
tree_res
@@ Durable.move_tree_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str from)
(Durable.key_of_string_exn @@ Durable_operation.key_to_str to_)
| Some_input (Delete, (edit_readonly, key)) ->
tree_res
@@ Durable.delete
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (List, key) ->
value_res
@@ Durable.list
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Count_subtrees, key) ->
value_res
@@ Durable.count_subtrees
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Substree_name_at, (key, idx)) ->
value_res
@@ Durable.subtree_name_at
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
idx
| Some_input (Hash, key) ->
value_res
@@ Durable.hash
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Hash_exn, key) ->
value_res
@@ Durable.hash_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
| Some_input (Write_value_exn, (edit_readonly, key, offset, value)) ->
tree_res
@@ Durable.write_value_exn
~edit_readonly
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
offset
value
| Some_input (Read_value_exn, (key, offset, len)) ->
value_res
@@ Durable.read_value_exn
dur
(Durable.key_of_string_exn @@ Durable_operation.key_to_str key)
offset
len)
init_durable
operations
end
|
562ebc39abf8072d11a2f8f67131202dd1545204022ade99560aaa2d69d81c76 | tweag/sparkle | Main.hs | {-# LANGUAGE OverloadedStrings #-}
# LANGUAGE StaticPointers #
module Main where
import Control.Distributed.Closure
import Control.Distributed.Spark as RDD
import qualified Data.Text as Text
main :: IO ()
main = forwardUnhandledExceptionsToSpark $ do
conf <- newSparkConf "RDD operations demo"
sc <- getOrCreateSparkContext conf
rdd <- parallelize sc $ Text.words "The quick brown fox jumps over the lazy dog"
print =<< collect rdd
print =<< RDD.reduce (closure $ static (\a b -> b <> " " <> a)) rdd
print =<< collect =<< RDD.map (closure $ static Text.reverse) rdd
print =<< RDD.take 3 rdd
print =<< collect =<< RDD.distinct rdd
print =<< RDD.fold (closure $ static (||)) False
=<< RDD.map (closure $ static (=="dog")) rdd
| null | https://raw.githubusercontent.com/tweag/sparkle/7ad678c6830cfe689b3bed03e008728e59c40cfc/apps/rdd-ops/Main.hs | haskell | # LANGUAGE OverloadedStrings # | # LANGUAGE StaticPointers #
module Main where
import Control.Distributed.Closure
import Control.Distributed.Spark as RDD
import qualified Data.Text as Text
main :: IO ()
main = forwardUnhandledExceptionsToSpark $ do
conf <- newSparkConf "RDD operations demo"
sc <- getOrCreateSparkContext conf
rdd <- parallelize sc $ Text.words "The quick brown fox jumps over the lazy dog"
print =<< collect rdd
print =<< RDD.reduce (closure $ static (\a b -> b <> " " <> a)) rdd
print =<< collect =<< RDD.map (closure $ static Text.reverse) rdd
print =<< RDD.take 3 rdd
print =<< collect =<< RDD.distinct rdd
print =<< RDD.fold (closure $ static (||)) False
=<< RDD.map (closure $ static (=="dog")) rdd
|
1820a171f1c1543cff0eab752ebdf8ca690b540fe89b1b834d3fd5225b709ad9 | imandra-ai/fix-engine | bench_engine_decode.ml | open Lwt.Syntax
(* ony block of test data *)
let test_data : string =
Test_engine_data.fix_data |> String.split_on_char '\n' |> List.map String.trim
|> List.filter (fun s -> s <> "")
|> String.concat ""
(* reference implementation *)
module Ref = struct
open Lwt.Infix
Converts [ ' 5 ' ; ' 2 ' ; ' = ' ; ' A ' ] to ( " 52 " , " A " )
let extract_key_value (chlist : char list) : string * string =
let buf = Buffer.create @@ List.length chlist in
let () = chlist |> List.iter @@ Buffer.add_char buf in
Buffer.contents buf |> String.split_on_char '=' |> function
| [] -> "", ""
| h :: tl -> h, String.concat "=" tl
let rec get_key_value ~split (ic : Lwt_io.input Lwt_io.channel) chars =
Lwt_io.read_char ic >>= fun c ->
if c <> split then
get_key_value ~split ic (c :: chars)
else
Lwt.return @@ extract_key_value @@ List.rev chars
let rec get_message ~split ic msg =
get_key_value ~split ic [] >>= fun (k, v) ->
let msg = (k, v) :: msg in
if k <> "10" then
get_message ~split ic msg
else
Lwt.return @@ List.rev msg
let read_next_message ~split ic =
Lwt.catch
(fun () ->
let+ m = get_message ~split ic [] in
Some m)
(function
| End_of_file -> Lwt.return None
| e -> raise e)
end
(* channel reading [n] copies of [test_data] *)
let source n : Lwt_io.input_channel =
let len_data = String.length test_data in
let n = ref n in
let off = ref 0 in
Lwt_io.make ~mode:Lwt_io.input (fun buf i ask_len ->
try
(* see if we still have bytes to read from [test_data] *)
if !off >= len_data then
if !n = 0 then
raise End_of_file
else (
decr n;
off := 0
);
(* how many bytes to copy *)
let len = min (len_data - !off) ask_len in
assert (len > 0);
Lwt_bytes.blit_from_string test_data !off buf i len;
off := !off + len;
Lwt.return len
with End_of_file -> Lwt.return 0)
let run_ref n =
Lwt_main.run
@@
let ic = source n in
let rec loop count =
let* m = Ref.read_next_message ~split:'|' ic in
match m with
| None -> Lwt.return count
| Some _ -> loop (count + 1)
in
let* _count = loop 0 in
(*Printf.printf "read %d messages\n%!" _count ;*)
Lwt.return ()
let run_cur n =
Lwt_main.run
@@
let ic = source n in
let reader = Fix_io.Read.create ~split:'|' ic in
let rec loop count =
let* m = Fix_io.Read.read_next_message reader in
match m with
| None -> Lwt.return count
| Some _ -> loop (count + 1)
in
let* _count = loop 0 in
(*Printf.printf "read %d messages\n%!" _count ;*)
Lwt.return ()
let run n =
let n_ref = run_ref n in
let n_cur = run_cur n in
assert (n_ref = n_cur);
Benchmark.throughputN 3
[
Printf.sprintf "run(%d)" n, run_cur, n;
Printf.sprintf "run-ref(%d)" n, run_ref, n;
]
let () =
List.iter
(fun n ->
let r = run n in
Benchmark.tabulate r)
[ 100; 1_000; 5_000 ]
| null | https://raw.githubusercontent.com/imandra-ai/fix-engine/42f3c4f3ca432469969e89e461ca76b52c21f282/tests/benchs/engine/bench_engine_decode.ml | ocaml | ony block of test data
reference implementation
channel reading [n] copies of [test_data]
see if we still have bytes to read from [test_data]
how many bytes to copy
Printf.printf "read %d messages\n%!" _count ;
Printf.printf "read %d messages\n%!" _count ; | open Lwt.Syntax
let test_data : string =
Test_engine_data.fix_data |> String.split_on_char '\n' |> List.map String.trim
|> List.filter (fun s -> s <> "")
|> String.concat ""
module Ref = struct
open Lwt.Infix
Converts [ ' 5 ' ; ' 2 ' ; ' = ' ; ' A ' ] to ( " 52 " , " A " )
let extract_key_value (chlist : char list) : string * string =
let buf = Buffer.create @@ List.length chlist in
let () = chlist |> List.iter @@ Buffer.add_char buf in
Buffer.contents buf |> String.split_on_char '=' |> function
| [] -> "", ""
| h :: tl -> h, String.concat "=" tl
let rec get_key_value ~split (ic : Lwt_io.input Lwt_io.channel) chars =
Lwt_io.read_char ic >>= fun c ->
if c <> split then
get_key_value ~split ic (c :: chars)
else
Lwt.return @@ extract_key_value @@ List.rev chars
let rec get_message ~split ic msg =
get_key_value ~split ic [] >>= fun (k, v) ->
let msg = (k, v) :: msg in
if k <> "10" then
get_message ~split ic msg
else
Lwt.return @@ List.rev msg
let read_next_message ~split ic =
Lwt.catch
(fun () ->
let+ m = get_message ~split ic [] in
Some m)
(function
| End_of_file -> Lwt.return None
| e -> raise e)
end
let source n : Lwt_io.input_channel =
let len_data = String.length test_data in
let n = ref n in
let off = ref 0 in
Lwt_io.make ~mode:Lwt_io.input (fun buf i ask_len ->
try
if !off >= len_data then
if !n = 0 then
raise End_of_file
else (
decr n;
off := 0
);
let len = min (len_data - !off) ask_len in
assert (len > 0);
Lwt_bytes.blit_from_string test_data !off buf i len;
off := !off + len;
Lwt.return len
with End_of_file -> Lwt.return 0)
let run_ref n =
Lwt_main.run
@@
let ic = source n in
let rec loop count =
let* m = Ref.read_next_message ~split:'|' ic in
match m with
| None -> Lwt.return count
| Some _ -> loop (count + 1)
in
let* _count = loop 0 in
Lwt.return ()
let run_cur n =
Lwt_main.run
@@
let ic = source n in
let reader = Fix_io.Read.create ~split:'|' ic in
let rec loop count =
let* m = Fix_io.Read.read_next_message reader in
match m with
| None -> Lwt.return count
| Some _ -> loop (count + 1)
in
let* _count = loop 0 in
Lwt.return ()
let run n =
let n_ref = run_ref n in
let n_cur = run_cur n in
assert (n_ref = n_cur);
Benchmark.throughputN 3
[
Printf.sprintf "run(%d)" n, run_cur, n;
Printf.sprintf "run-ref(%d)" n, run_ref, n;
]
let () =
List.iter
(fun n ->
let r = run n in
Benchmark.tabulate r)
[ 100; 1_000; 5_000 ]
|
eb761cad53683e78eff2f87ab3bc1fee675daecfe3a951c64211587fa12324c9 | mariari/Misc-Lisp-Scripts | draw.lisp | (defpackage :clim-yaml
(:use :clim :clim-lisp :cl-user)
(:export main-gui))
(in-package :clim-yaml)
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Types
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defclass stack-yaml-view (view)
((%yaml :initarg :stack-yaml :reader stack-yaml)))
(defclass stack-yaml-object-wraper ()
((%yaml :initarg :stack-yaml :reader stack-yaml)))
(defclass show-view (view) ())
(defun wrap-yaml (yaml)
(make-instance 'stack-yaml-object-wraper :stack-yaml yaml))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Main Application
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define-application-frame display-clim ()
((%top-stack :initform (wrap-yaml cl-user::*project*) :accessor root))
(:panes
(make-pane :application
:width 600
:height 800
:display-function #'generate-graph
:display-time t
:default-view (make-instance 'show-view))
(interactor :interactor :height 100 :width 100))
(:layouts
(default (vertically ()
(9/10 make-pane)
(1/10 interactor)))))
(defun generate-graph (frame pane)
(let ((unwraped (stack-yaml (root frame))))
(format-graph-from-roots
(list unwraped)
(lambda (object stream)
(let ((wrap (wrap-yaml object)))
(present wrap (presentation-type-of wrap) :stream stream)))
;; Filtered version of the function. Could call
;; #'cl-user::stack-yaml-packages if one wants madness of arrows!
#'filter-unique-sub-packages
:stream pane
:merge-duplicates t
:maximize-generations t
:graph-type :dag
:center-nodes t
:orientation :vertical
:generation-separation 20
:within-generation-separation 20
:arc-drawer #'draw-same-y-line*
:arc-drawing-options (list :line-thickness 1.4 :head-width 5))))
(defun views-example ()
(run-frame-top-level (make-application-frame 'display-clim)))
(defun main-gui ()
(views-example))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Filtering Children Packages
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun filter-unique-sub-packages (package)
"given a package, find all packages that entail all the dependent packages.
Thus if the given package relies on packages A and B, but package A
relies on B, then only give back A"
(let* ((seen-set (fset:convert 'fset:set (dependency-names package)))
(children-packages (cl-user::stack-yaml-packages package))
(unique-children (reduce #'recursively-remove
children-packages
:initial-value seen-set)))
(remove-if-not (lambda (package)
(fset:member? (cl-user::stack-yaml-name package) unique-children))
children-packages)))
(defun dependency-names (package)
"gets the name of all the children packages"
cl-user::(mapcar #'stack-yaml-name (stack-yaml-packages package)))
;; Could be done more generally by getting the list of children, then disjoint union
(defun recursively-remove (set package)
"takes a set of names and a package and removes all generation of
children from the current set"
(let ((reduced-set
(reduce #'fset:less (dependency-names package) :initial-value set)))
(reduce #'recursively-remove
(cl-user::stack-yaml-packages package)
:initial-value reduced-set)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Commands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define-display-clim-command (com-focus :name t) ((yaml 'stack-yaml-object-wraper))
(if yaml
(setf (root *application-frame*) yaml)
(format (frame-standard-input *application-frame*)
"Please give a stack yaml"))
(redisplay-frame-panes *application-frame* :force-p t))
(define-display-clim-command (com-reset :name t) ()
(setf (root *application-frame*) (wrap-yaml cl-user::*project*))
(redisplay-frame-panes *application-frame* :force-p t))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Presentation
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun present-stack-yaml (pane stack-yaml-wraper)
(with-output-as-presentation (pane stack-yaml-wraper 'stack-yaml-object-wraper)
(let ((unwrapped (stack-yaml stack-yaml-wraper)))
(format pane "~A"
(cl-user::stack-yaml-name unwrapped)))))
(define-presentation-method present ((object stack-yaml-object-wraper)
(type stack-yaml-object-wraper)
(stream extended-output-stream)
(view show-view)
&key)
(surrounding-output-with-border (stream :shape :rectangle :background +alice-blue+)
(present-stack-yaml stream object)))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Color Drawing
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(let ((inks (make-contrasting-inks 5)))
(defun get-contrasting-ink (i)
(elt inks (mod i 5))))
(defun draw-same-y-line* (stream from-node to-node x1 y1 x2 y2
&rest drawing-options
&key &allow-other-keys)
(declare (ignore from-node to-node))
(apply #'draw-arrow* stream x1 y1 x2 y2
(append drawing-options (list :ink (get-contrasting-ink (floor y1))))))
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; Dragging functionality
;; Taken from the Demo code!
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(defun find-graph-node (record)
"Searches upward until a graph node parent of the supplied output record is found."
(loop for current = record then (output-record-parent current)
while current
when (graph-node-output-record-p current)
do (return current)))
(defun node-edges (node)
(append (alexandria:hash-table-values (slot-value node 'climi::edges-from))
(alexandria:hash-table-values (slot-value node 'climi::edges-to))))
(defun node-and-edges-region (node edges)
(reduce #'region-union edges :key #'copy-rectangle
:initial-value (copy-rectangle node)))
(defun redisplay-edges (graph edges)
(dolist (edge edges)
(climi::layout-edge-1 graph (climi::from-node edge) (climi::to-node edge))))
( AH ) the day :
;;;
( I have n't looked in detail at the spec or to confirm my
assumptions here , but as I understand things .. ) CLIM regions are
;;; immutable. Output records ARE mutable. A McCLIM output record can
;;; be used as a rectangular region corresponding to its bounding
;;; rectangle. But this bounding rectangle is not immutable! So,
;;; region operations such as region-union may build a rectangle-set
;;; capturing the mutable output-record object, violating the
;;; immutability of regions and causing widespread panic and
;;; confusion.
(defun copy-rectangle (region)
(with-bounding-rectangle* (x0 y0 x1 y1) region
;; We use this rectangle to clear an area on the sheet which only
;; makes sense for integer coordinates.
(make-rectangle* (floor x0) (floor y0) (ceiling x1) (ceiling y1))))
(define-display-clim-command (com-drag-node)
((record t) (offset-x real :default 0) (offset-y real :default 0))
(let* ((stream *standard-output*)
(node-record (find-graph-node record))
(edge-records (node-edges node-record))
(graph-record (output-record-parent node-record))
(erase-region))
(assert (typep graph-record 'graph-output-record))
(drag-output-record
stream node-record
:feedback (lambda (record stream old-x old-y x y mode)
(declare (ignore old-x old-y))
(ecase mode
(:erase
;; Capture current regions before modifying the
;; output records.
(setf erase-region
(node-and-edges-region record edge-records))
;; Remove contents (i.e. lines) of edge output
;; records. This does not repaint anything. To
;; account for that, we include ERASE-REGION in
;; the :DRAW clause.
(map nil #'clear-output-record edge-records))
(:draw
;; Reposition the node record (this does not
;; automatically replay the record).
(setf (output-record-position record)
(values (- x offset-x) (- y offset-y)))
;; Regenerate child records of the edge records
;; for the changed node position (without drawing
;; since we will draw everything at once as a
;; final step).
(with-output-recording-options (stream :record t :draw nil)
(redisplay-edges graph-record edge-records))
;; Repaint all affected areas. This also replays
;; the modified node and edge output records.
(repaint-sheet
stream (region-union (or erase-region +nowhere+)
(node-and-edges-region
record edge-records))))))
:finish-on-release t :multiple-window nil)))
(define-presentation-to-command-translator record-dragging-translator
(t com-drag-node display-clim
:tester ((object presentation)
(find-graph-node presentation)))
(object presentation x y)
(multiple-value-bind (old-x old-y) (output-record-position presentation)
(list presentation (- x old-x) (- y old-y))))
| null | https://raw.githubusercontent.com/mariari/Misc-Lisp-Scripts/8ab235865227bae0f249b3938b819f6c6420f92a/projects/haskell-yaml-generator/gui/draw.lisp | lisp |
Types
Main Application
Filtered version of the function. Could call
#'cl-user::stack-yaml-packages if one wants madness of arrows!
Filtering Children Packages
Could be done more generally by getting the list of children, then disjoint union
Commands
Presentation
Color Drawing
Dragging functionality
Taken from the Demo code!
immutable. Output records ARE mutable. A McCLIM output record can
be used as a rectangular region corresponding to its bounding
rectangle. But this bounding rectangle is not immutable! So,
region operations such as region-union may build a rectangle-set
capturing the mutable output-record object, violating the
immutability of regions and causing widespread panic and
confusion.
We use this rectangle to clear an area on the sheet which only
makes sense for integer coordinates.
Capture current regions before modifying the
output records.
Remove contents (i.e. lines) of edge output
records. This does not repaint anything. To
account for that, we include ERASE-REGION in
the :DRAW clause.
Reposition the node record (this does not
automatically replay the record).
Regenerate child records of the edge records
for the changed node position (without drawing
since we will draw everything at once as a
final step).
Repaint all affected areas. This also replays
the modified node and edge output records. | (defpackage :clim-yaml
(:use :clim :clim-lisp :cl-user)
(:export main-gui))
(in-package :clim-yaml)
(defclass stack-yaml-view (view)
((%yaml :initarg :stack-yaml :reader stack-yaml)))
(defclass stack-yaml-object-wraper ()
((%yaml :initarg :stack-yaml :reader stack-yaml)))
(defclass show-view (view) ())
(defun wrap-yaml (yaml)
(make-instance 'stack-yaml-object-wraper :stack-yaml yaml))
(define-application-frame display-clim ()
((%top-stack :initform (wrap-yaml cl-user::*project*) :accessor root))
(:panes
(make-pane :application
:width 600
:height 800
:display-function #'generate-graph
:display-time t
:default-view (make-instance 'show-view))
(interactor :interactor :height 100 :width 100))
(:layouts
(default (vertically ()
(9/10 make-pane)
(1/10 interactor)))))
(defun generate-graph (frame pane)
(let ((unwraped (stack-yaml (root frame))))
(format-graph-from-roots
(list unwraped)
(lambda (object stream)
(let ((wrap (wrap-yaml object)))
(present wrap (presentation-type-of wrap) :stream stream)))
#'filter-unique-sub-packages
:stream pane
:merge-duplicates t
:maximize-generations t
:graph-type :dag
:center-nodes t
:orientation :vertical
:generation-separation 20
:within-generation-separation 20
:arc-drawer #'draw-same-y-line*
:arc-drawing-options (list :line-thickness 1.4 :head-width 5))))
(defun views-example ()
(run-frame-top-level (make-application-frame 'display-clim)))
(defun main-gui ()
(views-example))
(defun filter-unique-sub-packages (package)
"given a package, find all packages that entail all the dependent packages.
Thus if the given package relies on packages A and B, but package A
relies on B, then only give back A"
(let* ((seen-set (fset:convert 'fset:set (dependency-names package)))
(children-packages (cl-user::stack-yaml-packages package))
(unique-children (reduce #'recursively-remove
children-packages
:initial-value seen-set)))
(remove-if-not (lambda (package)
(fset:member? (cl-user::stack-yaml-name package) unique-children))
children-packages)))
(defun dependency-names (package)
"gets the name of all the children packages"
cl-user::(mapcar #'stack-yaml-name (stack-yaml-packages package)))
(defun recursively-remove (set package)
"takes a set of names and a package and removes all generation of
children from the current set"
(let ((reduced-set
(reduce #'fset:less (dependency-names package) :initial-value set)))
(reduce #'recursively-remove
(cl-user::stack-yaml-packages package)
:initial-value reduced-set)))
(define-display-clim-command (com-focus :name t) ((yaml 'stack-yaml-object-wraper))
(if yaml
(setf (root *application-frame*) yaml)
(format (frame-standard-input *application-frame*)
"Please give a stack yaml"))
(redisplay-frame-panes *application-frame* :force-p t))
(define-display-clim-command (com-reset :name t) ()
(setf (root *application-frame*) (wrap-yaml cl-user::*project*))
(redisplay-frame-panes *application-frame* :force-p t))
(defun present-stack-yaml (pane stack-yaml-wraper)
(with-output-as-presentation (pane stack-yaml-wraper 'stack-yaml-object-wraper)
(let ((unwrapped (stack-yaml stack-yaml-wraper)))
(format pane "~A"
(cl-user::stack-yaml-name unwrapped)))))
(define-presentation-method present ((object stack-yaml-object-wraper)
(type stack-yaml-object-wraper)
(stream extended-output-stream)
(view show-view)
&key)
(surrounding-output-with-border (stream :shape :rectangle :background +alice-blue+)
(present-stack-yaml stream object)))
(let ((inks (make-contrasting-inks 5)))
(defun get-contrasting-ink (i)
(elt inks (mod i 5))))
(defun draw-same-y-line* (stream from-node to-node x1 y1 x2 y2
&rest drawing-options
&key &allow-other-keys)
(declare (ignore from-node to-node))
(apply #'draw-arrow* stream x1 y1 x2 y2
(append drawing-options (list :ink (get-contrasting-ink (floor y1))))))
(defun find-graph-node (record)
"Searches upward until a graph node parent of the supplied output record is found."
(loop for current = record then (output-record-parent current)
while current
when (graph-node-output-record-p current)
do (return current)))
(defun node-edges (node)
(append (alexandria:hash-table-values (slot-value node 'climi::edges-from))
(alexandria:hash-table-values (slot-value node 'climi::edges-to))))
(defun node-and-edges-region (node edges)
(reduce #'region-union edges :key #'copy-rectangle
:initial-value (copy-rectangle node)))
(defun redisplay-edges (graph edges)
(dolist (edge edges)
(climi::layout-edge-1 graph (climi::from-node edge) (climi::to-node edge))))
( AH ) the day :
( I have n't looked in detail at the spec or to confirm my
assumptions here , but as I understand things .. ) CLIM regions are
(defun copy-rectangle (region)
(with-bounding-rectangle* (x0 y0 x1 y1) region
(make-rectangle* (floor x0) (floor y0) (ceiling x1) (ceiling y1))))
(define-display-clim-command (com-drag-node)
((record t) (offset-x real :default 0) (offset-y real :default 0))
(let* ((stream *standard-output*)
(node-record (find-graph-node record))
(edge-records (node-edges node-record))
(graph-record (output-record-parent node-record))
(erase-region))
(assert (typep graph-record 'graph-output-record))
(drag-output-record
stream node-record
:feedback (lambda (record stream old-x old-y x y mode)
(declare (ignore old-x old-y))
(ecase mode
(:erase
(setf erase-region
(node-and-edges-region record edge-records))
(map nil #'clear-output-record edge-records))
(:draw
(setf (output-record-position record)
(values (- x offset-x) (- y offset-y)))
(with-output-recording-options (stream :record t :draw nil)
(redisplay-edges graph-record edge-records))
(repaint-sheet
stream (region-union (or erase-region +nowhere+)
(node-and-edges-region
record edge-records))))))
:finish-on-release t :multiple-window nil)))
(define-presentation-to-command-translator record-dragging-translator
(t com-drag-node display-clim
:tester ((object presentation)
(find-graph-node presentation)))
(object presentation x y)
(multiple-value-bind (old-x old-y) (output-record-position presentation)
(list presentation (- x old-x) (- y old-y))))
|
cbc452613e5c70a4617959a2dcde84407e023ad2e0181120d631c7cca326ab6c | alsonkemp/turbinado-website | Home.hs | module App.Controllers.Home where
import Turbinado.Controller
index :: Controller ()
index = return ()
performance :: Controller ()
performance = return ()
install :: Controller ()
install = return ()
architecture :: Controller ()
architecture= return ()
hello :: Controller ()
hello = clearLayout
| null | https://raw.githubusercontent.com/alsonkemp/turbinado-website/310f38dd7d13cdb838e2ea6181c61521021c32b5/App/Controllers/Home.hs | haskell | module App.Controllers.Home where
import Turbinado.Controller
index :: Controller ()
index = return ()
performance :: Controller ()
performance = return ()
install :: Controller ()
install = return ()
architecture :: Controller ()
architecture= return ()
hello :: Controller ()
hello = clearLayout
|
|
e18162b6799b40ec2b9d9823a4ac7aeb4166a620cbb273e80fe049c7a2788f7e | composewell/streamly-coreutils | Dirname.hs | -- |
Module : Streamly . Coreutils . Dirname
Copyright : ( c ) 2022 Composewell Technologies
-- License : BSD-3-Clause
-- Maintainer :
-- Stability : experimental
Portability : GHC
--
Strip the last component from file name .
module Streamly.Coreutils.Dirname
(dirname)
where
import System.FilePath (takeDirectory)
dirname :: FilePath -> FilePath
dirname = takeDirectory
| null | https://raw.githubusercontent.com/composewell/streamly-coreutils/0a80210f2bbe63c5400682d23e6b1d5a18c11c3d/src/Streamly/Coreutils/Dirname.hs | haskell | |
License : BSD-3-Clause
Maintainer :
Stability : experimental
| Module : Streamly . Coreutils . Dirname
Copyright : ( c ) 2022 Composewell Technologies
Portability : GHC
Strip the last component from file name .
module Streamly.Coreutils.Dirname
(dirname)
where
import System.FilePath (takeDirectory)
dirname :: FilePath -> FilePath
dirname = takeDirectory
|
9b0fc3d883e329ccaa58c1b654056334823cb0deccb5183237d41ebd48aa1a97 | csabahruska/jhc-components | TypeCheck.hs | module E.TypeCheck(
canBeBox,
eAp,
inferType,
infertype,
typecheck,
match,
sortSortLike,
sortKindLike,
sortTermLike,
sortTypeLike,
typeInfer,
typeInfer'
) where
import Control.Monad.Reader
import Control.Monad.Writer
import qualified Data.Map as Map
import Doc.DocLike
import Doc.PPrint
import Doc.Pretty
import E.E
import E.Eval(strong)
import E.Subst
import GenUtil
import Name.Id
import Name.Name
import Name.Names
import Support.CanType
import Util.ContextMonad
import Util.SetLike
import qualified Util.Seq as Seq
import {-# SOURCE #-} DataConstructors
import {-# SOURCE #-} E.Show
@Internals
# Jhc Core Type System
Jhc 's core is based on a pure type system . A pure type system ( also called a
PTS ) is actually a parameterized set of type systems . Jhc 's version is
described by the following .
Sorts = ( * , ! , * * , # , ( # ) , # # , □ )
Axioms = ( * :* * , # : # # , ! :* * , * * : □ , # # : □ )
-- sort kind
* is the kind of boxed values
! is the kind of boxed strict values
# is the kind of unboxed values
( # ) is the kind of unboxed tuples
-- sort superkind
* * is the superkind of all boxed value
# # is the superkind of all unboxed values
-- sort box
□ superkinds inhabit this
in addition there exist user defined kinds , which are always of supersort # #
The following Rules table shows what sort of abstractions are allowed , a rule
of the form ( A , B , C ) means you can have functions of things of sort A to things
of sort B and the result is something of sort C. _ Function _ in this context
subsumes both term and type level abstractions .
Notice that functions are always boxed , but may be strict if they take an
unboxed tuple as an argument . When a function is strict it means that it is
represented by a pointer to code directly , it can not be a suspended value that
evaluates to a function .
These type system rules apply to lambda abstractions . It is possible that data
constructors might exist that can not be given a type on their own with these
rules , even though when fully applied it has a well formed type . An example
would be unboxed tuples . This presents no difficulty as one concludes correctly
that it is a type error for these constructors to ever appear when not fully
saturated with arguments .
as a shortcut we will use * # to mean every combination involving * and # , and so forth .
for instance , ( * # , * # , * ) means the set ( * , * , * ) ( # , * , * ) ( * , # , * ) ( # , # , * )
Rules =
( * # ! , * # ! , * ) -- functions from values to values are boxed and lazy
( * # ! , ( # ) , * ) -- functions from values to unboxed tuples are boxed and lazy
( ( # ) , * # ! , ! ) -- functions from unboxed tuples to values are boxed and strict
( ( # ) , ( # ) , ! ) -- functions from unboxed tuples to unboxed tuples are boxed and strict
( * * , * , * ) -- may have a function from an unboxed type to a value
( * * , # , * )
( * * , ! , * )
( * * , * * , * * ) -- we have functions from types to types
( * * , # # , # # ) -- MutArray _ : : * - > #
( # # , # # , # # ) -- Complex _ : : # - > #
The defining feature of boxed values is
_ | _ : : t iff t : :*
This PTS is functional but not injective
The PTS can be considered stratified into the following levels
□ - sort box
* * , # # , - sort superkind
* , # , ( # ) , ! - sort kind
Int , Bits32_,Char - sort type
3,True,"bob " - sort value
# # On boxed kinds
The boxed kinds ( * and ! ) represent types that have a uniform run time
representation . Due to this , functions may be written that are polymorphic in types of these kinds .
Hence the rules of the form ( * * , ? , ? ) , allowing taking types of boxed kinds as arguments .
the unboxed kind # is inhabited with types that have their own specific run
time representation . Hence you can not write functions that are polymorphic in
unboxed types
# # On sort box , the unboxed tuple , and friends
Although sort box does not appear in the code , it is useful from a theoretical
point of view to talk about certain types such as the types of unboxed tuples .
tuples may have boxed and unboxed arguments , without sort box it would
be impossible to express this since it must be superkind polymorphic . sort box
allows one to express this as ( in the case of the unboxed 2 - tuple )
: □ : □ ∀k1 : s1 ∀k2 : s2 ∀t1 : : k2 . ( # t1 , t2 # )
However , although this is a valid typing of what it would mean if a unboxed
tuple were not fully applied , since we do not have any rules of form ( # # , ? , ? ) or
( □ , ? , ? ) this type obviously does not typecheck . Which is what enforces the
invarient that unboxed tuples are always fully applied , and is also why we do
not need a code representation of sort box .
# # # Do we need a superbox ?
You will notice that if you look at the axioms involving the sorts , you end up
with a disjoint graph
□ - the box
/ \
* * # # - superkind
/\ \
* ! # ( # ) - kind
This is simply due to the fact that nothing is polymorphic in unboxed tuples of
kind ( # ) so we never need to refer to any super - sorts of them . We can add sorts
( # # ) , ( □ ) and □ □ to fill in the gaps , but since these sorts will never appear in
code or discourse , we will ignore them from now on .
□ □ - sort superbox
/ \
□ ( □ ) - sort box
/ \ \
* * # # ( # # ) - sort superkind
/\ \ |
* ! # ( # ) - sort kind
# Jhc Core Type System
Jhc's core is based on a pure type system. A pure type system (also called a
PTS) is actually a parameterized set of type systems. Jhc's version is
described by the following.
Sorts = (*, !, **, #, (#), ##, □)
Axioms = (*:**, #:##, !:**, **:□, ##:□)
-- sort kind
* is the kind of boxed values
! is the kind of boxed strict values
# is the kind of unboxed values
(#) is the kind of unboxed tuples
-- sort superkind
** is the superkind of all boxed value
## is the superkind of all unboxed values
-- sort box
□ superkinds inhabit this
in addition there exist user defined kinds, which are always of supersort ##
The following Rules table shows what sort of abstractions are allowed, a rule
of the form (A,B,C) means you can have functions of things of sort A to things
of sort B and the result is something of sort C. _Function_ in this context
subsumes both term and type level abstractions.
Notice that functions are always boxed, but may be strict if they take an
unboxed tuple as an argument. When a function is strict it means that it is
represented by a pointer to code directly, it cannot be a suspended value that
evaluates to a function.
These type system rules apply to lambda abstractions. It is possible that data
constructors might exist that cannot be given a type on their own with these
rules, even though when fully applied it has a well formed type. An example
would be unboxed tuples. This presents no difficulty as one concludes correctly
that it is a type error for these constructors to ever appear when not fully
saturated with arguments.
as a shortcut we will use *# to mean every combination involving * and #, and so forth.
for instance, (*#,*#,*) means the set (*,*,*) (#,*,*) (*,#,*) (#,#,*)
Rules =
(*#!,*#!,*) -- functions from values to values are boxed and lazy
(*#!,(#),*) -- functions from values to unboxed tuples are boxed and lazy
((#),*#!,!) -- functions from unboxed tuples to values are boxed and strict
((#),(#),!) -- functions from unboxed tuples to unboxed tuples are boxed and strict
(**,*,*) -- may have a function from an unboxed type to a value
(**,#,*)
(**,!,*)
(**,**,**) -- we have functions from types to types
(**,##,##) -- MutArray_ :: * -> #
(##,##,##) -- Complex_ :: # -> #
The defining feature of boxed values is
_|_ :: t iff t::*
This PTS is functional but not injective
The PTS can be considered stratified into the following levels
□ - sort box
**,##, - sort superkind
*,#,(#),! - sort kind
Int,Bits32_,Char - sort type
3,True,"bob" - sort value
## On boxed kinds
The boxed kinds (* and !) represent types that have a uniform run time
representation. Due to this, functions may be written that are polymorphic in types of these kinds.
Hence the rules of the form (**,?,?), allowing taking types of boxed kinds as arguments.
the unboxed kind # is inhabited with types that have their own specific run
time representation. Hence you cannot write functions that are polymorphic in
unboxed types
## On sort box, the unboxed tuple, and friends
Although sort box does not appear in the code, it is useful from a theoretical
point of view to talk about certain types such as the types of unboxed tuples.
Unboxed tuples may have boxed and unboxed arguments, without sort box it would
be impossible to express this since it must be superkind polymorphic. sort box
allows one to express this as (in the case of the unboxed 2-tuple)
∀s1:□ ∀s2:□ ∀k1:s1 ∀k2:s2 ∀t1:k1 ∀t2:k2 . (# t1, t2 #)
However, although this is a valid typing of what it would mean if a unboxed
tuple were not fully applied, since we do not have any rules of form (##,?,?) or
(□,?,?) this type obviously does not typecheck. Which is what enforces the
invarient that unboxed tuples are always fully applied, and is also why we do
not need a code representation of sort box.
### Do we need a superbox?
You will notice that if you look at the axioms involving the sorts, you end up
with a disjoint graph
□ - the box
/ \
** ## - superkind
/\ \
* ! # (#) - kind
This is simply due to the fact that nothing is polymorphic in unboxed tuples of
kind (#) so we never need to refer to any super-sorts of them. We can add sorts
(##),(□) and □□ to fill in the gaps, but since these sorts will never appear in
code or discourse, we will ignore them from now on.
□□ - sort superbox
/ \
□ (□) - sort box
/ \ \
** ## (##) - sort superkind
/\ \ |
* ! # (#) - sort kind
-}
ptsAxioms :: Map.Map ESort ESort
ptsAxioms = Map.fromList [
(EStar,EStarStar),
(EBang,EStarStar),
(EHash,EHashHash),
(ETuple,EHashHash)
]
ptsRulesMap :: Map.Map (ESort,ESort) ESort
ptsRulesMap = Map.fromList [ ((a,b),c) | (as,bs,c) <- ptsRules, a <- as, b <- bs ] where
starHashBang = [EStar,EHash,EBang]
ptsRules = [
(starHashBang,ETuple:starHashBang,EStar),
([ETuple],ETuple:starHashBang,EBang),
([EStarStar],starHashBang,EStar),
([EStarStar],[EStarStar],EStarStar),
([EStarStar],[EHashHash],EHashHash),
([EHashHash],[EHashHash],EHashHash)
]
canBeBox x | getType (getType x) == ESort EStarStar = True
canBeBox _ = False
tBox = mktBox eStar
monadicLookup key m = case Map.lookup key m of
Just x -> return x
Nothing -> fail "Key not found"
-- Fast (and lazy, and perhaps unsafe) typeof
instance CanType E where
type TypeOf E = E
getType (ESort s) = ESort $ getType s
getType (ELit l) = getType l
getType (EVar v) = getType v
getType e@(EPi TVr { tvrType = a } b)
| isUnknown typa || isUnknown typb = Unknown
| otherwise = maybe (error $ "E.TypeCheck.getType: " ++ show (e,getType a,getType b)) ESort $ do
ESort s1 <- return $ getType a
ESort s2 <- return $ getType b
monadicLookup (s1,s2) ptsRulesMap
where typa = getType a; typb = getType b
getType (EAp (ELit LitCons { litType = EPi tvr a }) b) = getType (subst tvr b a)
getType (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = getType (foldl eAp af (litArgs lc ++ [b]))
getType (EAp (EPi tvr a) b) = getType (subst tvr b a)
getType e@(EAp a b) = ans where
ans = if isUnknown typa then Unknown else if a == tBox || typa == tBox then tBox else (case a of
(ELit LitCons {}) -> error $ "getType: application of type alias " ++ (render $ parens $ ePretty e)
_ -> eAp typa b)
typa = getType a
getType (ELam (TVr { tvrIdent = x, tvrType = a}) b) = EPi (tVr x a) (getType b)
getType (ELetRec _ e) = getType e
getType ECase {eCaseType = ty} = ty
getType (EError _ e) = e
getType (EPrim _ _ t) = t
getType Unknown = Unknown
instance CanType ESort where
type TypeOf ESort = ESort
getType (ESortNamed _) = EHashHash
getType s = case Map.lookup s ptsAxioms of
Just s -> s
Nothing -> error $ "getType: " ++ show s
instance CanType TVr where
type TypeOf TVr = E
getType = tvrType
instance CanType (Lit x t) where
type TypeOf (Lit x t) = t
getType l = litType l
instance CanType e => CanType (Alt e) where
type TypeOf (Alt e) = TypeOf e
getType (Alt _ e) = getType e
sortSortLike (ESort s) = isEHashHash s || isEStarStar s
sortSortLike _ = False
sortKindLike (ESort s) = not (isEHashHash s) && not (isEStarStar s)
sortKindLike e = sortSortLike (getType e)
sortTypeLike ESort {} = False
sortTypeLike e = sortKindLike (getType e)
sortTermLike ESort {} = False
sortTermLike e = sortTypeLike (getType e)
withContextDoc s a = withContext (render s) a
-- | Perform a full typecheck, evaluating type terms as necessary.
inferType :: (ContextMonad m, ContextOf m ~ String) => DataTable -> [(TVr,E)] -> E -> m E
inferType dataTable ds e = rfc e where
inferType' ds e = inferType dataTable ds e
prettyE = ePretty
rfc e = withContextDoc (text "fullCheck:" </> prettyE e) (fc e >>= strong')
rfc' nds e = withContextDoc (text "fullCheck':" </> prettyE e) (inferType' nds e)
strong' e = withContextDoc (parens $ text "Strong:" </> prettyE e) $ strong ds e
fc s@(ESort _) = return $ getType s
fc (ELit lc@LitCons {}) | let lc' = updateLit dataTable lc, litAliasFor lc /= litAliasFor lc' = fail $ "Alias not correct: " ++ show (lc, litAliasFor lc')
fc (ELit LitCons { litName = n, litArgs = es, litType = t}) | nameType n == TypeConstructor, Just _ <- fromUnboxedNameTuple n = do
withContext ("Checking Unboxed Tuple: " ++ show n) $ do
-- we omit kind checking for unboxed tuples
valid t
es' <- mapM rfc es
strong' t
fc e@(ELit LitCons { litName = n, litArgs = es, litType = t}) = do
withContext ("Checking Constructor: " ++ show e) $ do
valid t
es' <- mapM rfc es
t' <- strong' t
let sts = slotTypes dataTable n t
les = length es
lsts = length sts
withContext ("Checking Args: " ++ show (sts,es')) $ do
unless (les == lsts || (les < lsts && isEPi t')) $ do
fail "constructor with wrong number of arguments"
zipWithM_ eq sts es'
return t'
fc e@(ELit _) = let t = getType e in valid t >> return t
fc (EVar (TVr { tvrIdent = eid })) | eid == emptyId = fail "variable with nothing!"
fc (EVar (TVr { tvrType = t})) = valid t >> strong' t
fc (EPi (TVr { tvrIdent = n, tvrType = at}) b) = do
ESort a <- rfc at
ESort b <- rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
liftM ESort $ monadicLookup (a,b) ptsRulesMap
--valid at >> rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
fc ( ELam tvr@(TVr n at ) b ) = valid at > > rfc ' [ d | d@(v , _ ) < - ds , tvrIdent v /= n ] b > > = \b ' - > ( strong ' $ EPi tvr b ' )
fc (ELam tvr@(TVr { tvrIdent = n, tvrType = at}) b) = do
withContext "Checking Lambda" $ do
valid at
b' <- withContext "Checking Lambda Body" $ rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
withContext "Checking lambda pi" $ strong' $ EPi tvr b'
fc (EAp (EPi tvr e) b) = rfc (subst tvr b e)
fc (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = rfc (EAp (foldl eAp af (litArgs lc)) b)
fc (EAp a b) = do
withContextDoc (text "EAp:" </> parens (prettyE a) </> parens (prettyE b)) $ do
a' <- rfc a
if a' == tBox then return tBox else strong' (eAp a' b)
fc (ELetRec vs e) = do
let ck (TVr { tvrIdent = eid },_) | eid == emptyId = fail "binding of empty var"
ck (tv@(TVr { tvrType = t}),e) = withContextDoc (hsep [text "Checking Let: ", parens (pprint tv),text " = ", parens $ prettyE e ]) $ do
when (getType t == eHash && not (isEPi t)) $ fail $ "Let binding unboxed value: " ++ show (tv,e)
valid' nds t
fceq nds e t
nds = vs ++ ds
mapM_ ck vs
when (hasRepeatUnder (tvrIdent . fst) vs) $ fail "Repeat Variable in ELetRec"
inferType' nds e
--et <- inferType' nds e
strong
fc (EError _ e) = valid e >> (strong' e)
fc (EPrim _ ts t) = mapM_ valid ts >> valid t >> ( strong' t)
TODO - this is a hack to get around case of constants .
withContext "Checking typelike pattern binding case" $ do
et <- rfc e
withContext "Checking typelike default binding" $ eq et (getType b)
verifyPats (casePats ec)
-- skip checking alternatives
ps <- mapM (strong' . getType) $ casePats ec
withContext "Checking typelike pattern equality" $ eqAll (et:ps)
strong' dt
fc ec@ECase {eCaseScrutinee = e, eCaseBind = b, eCaseAlts = as, eCaseType = dt } | sortTypeLike e = do -- TODO - we should substitute the tested for value into the default type.
withContext "Checking typelike binding case" $ do
et <- rfc e
withContext "Checking typelike default binding" $ eq et (getType b)
--dt <- rfc d
bs < - mapM rfc ( caseBodies ec ) -- these should be specializations of dt
withContext "Checking typelike alternatives" $ mapM_ (calt e) as
--eqAll bs
verifyPats (casePats ec)
ps <- withContext "Getting pattern types" $ mapM (strong' . getType) $ casePats ec
withContext "checking typelike pattern equality" $ eqAll (et:ps)
withContext "Evaluating Case Type" $ strong' dt
fc ec@ECase { eCaseScrutinee =e, eCaseBind = b } = do
withContext "Checking plain case" $ do
et <- rfc e
withContext "Checking default binding" $ eq et (getType b)
bs <- withContext "Checking case bodies" $ mapM rfc (caseBodies ec)
ect <- strong' (eCaseType ec)
withContext "Checking case bodies have equal types" $ eqAll (ect:bs)
verifyPats (casePats ec)
ps <- mapM (strong' . getType) $ casePats ec
withContext "checking pattern equality" $ eqAll (et:ps)
return ect
fc Unknown = return Unknown
--fc e = failDoc $ text "what's this? " </> (prettyE e)
calt (EVar v) (Alt l e) = do
let nv = followAliases undefined (patToLitEE l)
rfc (subst' v nv e)
calt _ (Alt _ e) = rfc e
verifyPats xs = do
mapM_ verifyPats' xs
when (hasRepeatUnder litHead xs) $ fail "Duplicate case alternatives"
verifyPats' LitCons { litArgs = xs } = when (hasRepeatUnder id (filter (/= emptyId) $ map tvrIdent xs)) $ fail "Case pattern is non-linear"
verifyPats' _ = return ()
eqAll ts = withContextDoc (text "eqAll" </> list (map prettyE ts)) $ foldl1M_ eq ts
valid s = valid' ds s
valid' nds ESort {} = return ()
valid' nds s
| Unknown <- s = return ()
| otherwise = withContextDoc (text "valid:" <+> prettyE s) (do t <- inferType' nds s; valid' nds t)
eq box t2 | boxCompat box t2 = return t2
eq t1 box | boxCompat box t1 = return t1
box = = , canBeBox t2 = return t2
eq t1 box | box = = , canBeBox t1 = return t1
eq Unknown t2 = return t2
eq t1 Unknown = return t1
eq t1 t2 = eq' ds t1 t2
eq' nds t1 t2 = do
e1 <- strong nds (t1)
e2 <- strong nds (t2)
case typesCompatable e1 e2 of
Just () -> return (e1)
Nothing -> failDoc $ text "eq:" <+> align $ vcat [ prettyE (e1), prettyE (e2) ]
fceq nds e1 t2 = do
withContextDoc (hsep [text "fceq:", align $ vcat [parens $ prettyE e1, parens $ prettyE t2]]) $ do
t1 <- inferType' nds e1
eq' nds t1 t2
boxCompat (ELit (LitCons { litName = n })) t | Just e <- fromConjured modBox n = e == getType t
boxCompat _ _ = False
-- This should perform a full typecheck and may take any extra information needed as an extra parameter
class CanTypeCheck a where
typecheck :: Monad m => DataTable -> a -> m E
infertype :: CanTypeCheck a => DataTable -> a -> E
infertype env a = case typecheck env a of
Left s -> error $ "infertype: " ++ s
Right x -> x
instance CanTypeCheck E where
typecheck dataTable e = case runContextEither $ typeInfer'' dataTable [] e of
Left ss -> fail $ "\n>>> internal error:\n" ++ unlines ss
Right v -> return v
instance CanTypeCheck TVr where
typecheck dt tvr = do
typecheck dt (getType tvr)
return $ getType tvr
instance CanTypeCheck (Lit a E) where
typecheck dt LitCons { litType = t } = typecheck dt t >> return t
typecheck dt LitInt { litType = t } = typecheck dt t >> return t
TODO , types might be bound in scrutinization
instance CanTypeCheck (Alt E) where
typecheck dt (Alt l e) = typecheck dt l >> typecheck dt e
-- | Determine type of term using full algorithm with substitutions. This
-- should be used instead of 'typ' when let-bound type variables exist or you
-- wish a more thorough checking of types.
typeInfer :: DataTable -> E -> E
typeInfer dataTable e = case runContextEither $ typeInfer'' dataTable [] e of
Left ss -> error $ "\n>>> internal error:\n" ++ unlines (tail ss)
Right v -> v
typeInfer' :: DataTable -> [(TVr,E)] -> E -> E
typeInfer' dataTable ds e = case runContextEither $ typeInfer'' dataTable ds e of
Left ss -> error $ "\n>>> internal error:\n" ++ unlines (tail ss)
Right v -> v
data TcEnv = TcEnv {
--tcDefns :: [(TVr,E)],
tcContext :: [String]
--tcDataTable :: DataTable
}
tcContext_u f r@TcEnv{tcContext = x} = r{tcContext = f x}
newtype Tc a = Tc (Reader TcEnv a)
deriving(Monad,Functor,MonadReader TcEnv)
instance ContextMonad Tc where
type ContextOf Tc = String
withContext s = local (tcContext_u (s:))
tcE : : E - > Tc E
tcE e = rfc e where
rfc e = withContextDoc ( text " tcE : " < / > ePretty e ) ( fc e > > = strong ' )
strong ' e = do
ds < - asks tcDefns
withContextDoc ( text " tcE.strong : " < / > ePretty e ) $ strong ds e
fc s@ESort { } = return $ getType s
fc ( ELit LitCons { litType = t } ) = strong ' t
fc e@ELit { } = strong ' ( )
fc ( EVar TVr { tvrIdent = eid } ) | eid = = emptyId = fail " variable with nothing ! "
fc ( EVar TVr { tvrType = t } ) = strong ' t
fc ( EPi TVr { tvrIdent = n , tvrType = at } b ) = do
ESort a < - rfc at
ESort b < - local ( ( \ds - > [ d | d@(v , _ ) < - ds , tvrIdent v /= n ] ) ) $ rfc b
liftM ESort $ monadicLookup ( a , b ) ptsRulesMap
fc ( ELam tvr@TVr { tvrIdent = n , tvrType = at } b ) = do
at ' < - strong ' at
b ' < - local ( ( \ds - > [ d | d@(v , _ ) < - ds , tvrIdent v /= n ] ) ) $ rfc b
return ( EPi ( tVr n at ' ) b ' )
fc ( EAp ( EPi tvr e ) b ) = do
b < - strong ' b
rfc ( subst tvr b e )
fc ( EAp ( ELit lc@LitCons { litAliasFor = Just af } ) b ) = fc ( EAp ( foldl eAp af ( ) ) b )
fc ( EAp a b ) = do
a ' < - rfc a
if a ' = = then return else strong ' ( eAp a ' b )
fc ( ELetRec vs e ) = local ( ( vs + + ) ) $ rfc e
fc ( EError _ e ) = strong ' e
fc ( EPrim _ ts t ) = strong ' t
fc ECase { eCaseType = ty } = do
strong ' ty
fc Unknown = return Unknown
fc e = failDoc $ text " what 's this ? " < / > ( ePretty e )
tcE :: E -> Tc E
tcE e = rfc e where
rfc e = withContextDoc (text "tcE:" </> ePretty e) (fc e >>= strong')
strong' e = do
ds <- asks tcDefns
withContextDoc (text "tcE.strong:" </> ePretty e) $ strong ds e
fc s@ESort {} = return $ getType s
fc (ELit LitCons { litType = t }) = strong' t
fc e@ELit {} = strong' (getType e)
fc (EVar TVr { tvrIdent = eid }) | eid == emptyId = fail "variable with nothing!"
fc (EVar TVr { tvrType = t}) = strong' t
fc (EPi TVr { tvrIdent = n, tvrType = at} b) = do
ESort a <- rfc at
ESort b <- local (tcDefns_u (\ds -> [ d | d@(v,_) <- ds, tvrIdent v /= n ])) $ rfc b
liftM ESort $ monadicLookup (a,b) ptsRulesMap
fc (ELam tvr@TVr { tvrIdent = n, tvrType = at} b) = do
at' <- strong' at
b' <- local (tcDefns_u (\ds -> [ d | d@(v,_) <- ds, tvrIdent v /= n ])) $ rfc b
return (EPi (tVr n at') b')
fc (EAp (EPi tvr e) b) = do
b <- strong' b
rfc (subst tvr b e)
fc (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = fc (EAp (foldl eAp af (litArgs lc)) b)
fc (EAp a b) = do
a' <- rfc a
if a' == tBox then return tBox else strong' (eAp a' b)
fc (ELetRec vs e) = local (tcDefns_u (vs ++)) $ rfc e
fc (EError _ e) = strong' e
fc (EPrim _ ts t) = strong' t
fc ECase { eCaseType = ty } = do
strong' ty
fc Unknown = return Unknown
fc e = failDoc $ text "what's this? " </> (ePretty e)
-}
typeInfer'' :: (ContextMonad m, ContextOf m ~ String) => DataTable -> [(TVr,E)] -> E -> m E
typeInfer'' dataTable ds e = rfc e where
inferType' ds e = typeInfer'' dataTable ds e
rfc e = withContextDoc (text "fullCheck':" </> ePretty e) (fc e >>= strong')
rfc' nds e = withContextDoc (text "fullCheck':" </> ePretty e) (inferType' nds e)
strong' e = withContextDoc (text "Strong':" </> ePretty e) $ strong ds e
fc s@ESort {} = return $ getType s
fc (ELit LitCons { litType = t }) = strong' t
fc e@ELit {} = strong' (getType e)
fc (EVar TVr { tvrIdent = eid }) | eid == emptyId = fail "variable with nothing!"
fc (EVar TVr { tvrType = t}) = strong' t
fc (EPi TVr { tvrIdent = n, tvrType = at} b) = do
ESort a <- rfc at
ESort b <- rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
liftM ESort $ monadicLookup (a,b) ptsRulesMap
fc (ELam tvr@TVr { tvrIdent = n, tvrType = at} b) = do
at' <- strong' at
b' <- rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
return (EPi (tVr n at') b')
fc (EAp (EPi tvr e) b) = do
b <- strong' b
rfc (subst tvr b e)
fc (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = fc (EAp (foldl eAp af (litArgs lc)) b)
fc (EAp a b) = do
a' <- rfc a
if a' == tBox then return tBox else strong' (eAp a' b)
fc (ELetRec vs e) = do
let nds = vs ++ ds
--et <- inferType' nds e
strong
inferType' nds e
fc (EError _ e) = strong' e
fc (EPrim _ ts t) = strong' t
fc ECase { eCaseType = ty } = do
strong' ty
fc Unknown = return Unknown
--fc e = failDoc $ text "what's this? " </> (ePretty e)
-- | find substitution that will transform the left term into the right one,
-- only substituting for the vars in the list
match :: Monad m =>
(Id -> Maybe E) -- ^ function to look up values in the environment
-> [TVr] -- ^ vars which may be substituted
-> E -- ^ pattern to match
-> E -- ^ input expression
-> m [(TVr,E)]
match lup vs = \e1 e2 -> liftM Seq.toList $ execWriterT (un e1 e2 etherealIds) where
bvs :: IdSet
bvs = fromList (map tvrIdent vs)
un (EAp a b) (EAp a' b') c = do
un a a' c
un b b' c
un (ELam va ea) (ELam vb eb) c = lam va ea vb eb c
un (EPi va ea) (EPi vb eb) c = lam va ea vb eb c
un (EPi va ea) (ELit LitCons { litName = ar, litArgs = [x,y], litType = lt}) c | ar == tc_Arrow = do
un (tvrType va) x c
un ea y c
un (EPrim s xs t) (EPrim s' ys t') c | length xs == length ys = do
sequence_ [ un x y c | x <- xs | y <- ys]
un t t' c
un (ESort x) (ESort y) c | x == y = return ()
un (ELit (LitInt x t1)) (ELit (LitInt y t2)) c | x == y = un t1 t2 c
un (ELit LitCons { litName = n, litArgs = xs, litType = t }) (ELit LitCons { litName = n', litArgs = ys, litType = t'}) c | n == n' && length xs == length ys = do
sequence_ [ un x y c | x <- xs | y <- ys]
un t t' c
un (EVar TVr { tvrIdent = i, tvrType = t}) (EVar TVr {tvrIdent = j, tvrType = u}) c | i == j = un t u c
un (EVar TVr { tvrIdent = i, tvrType = t}) (EVar TVr {tvrIdent = j, tvrType = u}) c | isEtherealId i || isEtherealId j = fail "Expressions don't match"
un (EAp a b) (ELit lc@LitCons { litArgs = bas@(_:_), litType = t }) c = do
let (al:as) = reverse bas
un a (ELit lc { litArgs = reverse as, litType = ePi tvr { tvrType = getType al } t }) c
un b al c
un (EAp a b) (EPi TVr { tvrType = a1 } a2) c = do
un a (ELit litCons { litArgs = [a1], litName = tc_Arrow, litType = EPi tvr { tvrType = getType a2 } (getType a1) }) c
un b a2 c
un (EVar tvr@TVr { tvrIdent = i, tvrType = t}) b c
| i `member` bvs = tell (Seq.singleton (tvr,b))
| otherwise = fail $ "Expressions do not unify: " ++ show tvr ++ show b
un a (EVar tvr) c | Just b <- lup (tvrIdent tvr), not $ isEVar b = un a b c
un a b c | Just a ' < - followAlias undefined a = un a ' b c
un a b c | Just b' <- followAlias undefined b = un a b' c
un a b _ = fail $ "Expressions do not unify: " ++ show a ++ show b
lam va ea vb eb (c:cs) = do
un (tvrType va) (tvrType vb) (c:cs)
un (subst va (EVar va { tvrIdent = c }) ea) (subst vb (EVar vb { tvrIdent = c }) eb) cs
lam _ _ _ _ _ = error "TypeCheck.match: bad."
| null | https://raw.githubusercontent.com/csabahruska/jhc-components/a7dace481d017f5a83fbfc062bdd2d099133adf1/jhc-core/src/E/TypeCheck.hs | haskell | # SOURCE #
# SOURCE #
sort kind
sort superkind
sort box
functions from values to values are boxed and lazy
functions from values to unboxed tuples are boxed and lazy
functions from unboxed tuples to values are boxed and strict
functions from unboxed tuples to unboxed tuples are boxed and strict
may have a function from an unboxed type to a value
we have functions from types to types
MutArray _ : : * - > #
Complex _ : : # - > #
sort kind
sort superkind
sort box
functions from values to values are boxed and lazy
functions from values to unboxed tuples are boxed and lazy
functions from unboxed tuples to values are boxed and strict
functions from unboxed tuples to unboxed tuples are boxed and strict
may have a function from an unboxed type to a value
we have functions from types to types
MutArray_ :: * -> #
Complex_ :: # -> #
Fast (and lazy, and perhaps unsafe) typeof
| Perform a full typecheck, evaluating type terms as necessary.
we omit kind checking for unboxed tuples
valid at >> rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
et <- inferType' nds e
skip checking alternatives
TODO - we should substitute the tested for value into the default type.
dt <- rfc d
these should be specializations of dt
eqAll bs
fc e = failDoc $ text "what's this? " </> (prettyE e)
This should perform a full typecheck and may take any extra information needed as an extra parameter
| Determine type of term using full algorithm with substitutions. This
should be used instead of 'typ' when let-bound type variables exist or you
wish a more thorough checking of types.
tcDefns :: [(TVr,E)],
tcDataTable :: DataTable
et <- inferType' nds e
fc e = failDoc $ text "what's this? " </> (ePretty e)
| find substitution that will transform the left term into the right one,
only substituting for the vars in the list
^ function to look up values in the environment
^ vars which may be substituted
^ pattern to match
^ input expression | module E.TypeCheck(
canBeBox,
eAp,
inferType,
infertype,
typecheck,
match,
sortSortLike,
sortKindLike,
sortTermLike,
sortTypeLike,
typeInfer,
typeInfer'
) where
import Control.Monad.Reader
import Control.Monad.Writer
import qualified Data.Map as Map
import Doc.DocLike
import Doc.PPrint
import Doc.Pretty
import E.E
import E.Eval(strong)
import E.Subst
import GenUtil
import Name.Id
import Name.Name
import Name.Names
import Support.CanType
import Util.ContextMonad
import Util.SetLike
import qualified Util.Seq as Seq
@Internals
# Jhc Core Type System
Jhc 's core is based on a pure type system . A pure type system ( also called a
PTS ) is actually a parameterized set of type systems . Jhc 's version is
described by the following .
Sorts = ( * , ! , * * , # , ( # ) , # # , □ )
Axioms = ( * :* * , # : # # , ! :* * , * * : □ , # # : □ )
* is the kind of boxed values
! is the kind of boxed strict values
# is the kind of unboxed values
( # ) is the kind of unboxed tuples
* * is the superkind of all boxed value
# # is the superkind of all unboxed values
□ superkinds inhabit this
in addition there exist user defined kinds , which are always of supersort # #
The following Rules table shows what sort of abstractions are allowed , a rule
of the form ( A , B , C ) means you can have functions of things of sort A to things
of sort B and the result is something of sort C. _ Function _ in this context
subsumes both term and type level abstractions .
Notice that functions are always boxed , but may be strict if they take an
unboxed tuple as an argument . When a function is strict it means that it is
represented by a pointer to code directly , it can not be a suspended value that
evaluates to a function .
These type system rules apply to lambda abstractions . It is possible that data
constructors might exist that can not be given a type on their own with these
rules , even though when fully applied it has a well formed type . An example
would be unboxed tuples . This presents no difficulty as one concludes correctly
that it is a type error for these constructors to ever appear when not fully
saturated with arguments .
as a shortcut we will use * # to mean every combination involving * and # , and so forth .
for instance , ( * # , * # , * ) means the set ( * , * , * ) ( # , * , * ) ( * , # , * ) ( # , # , * )
Rules =
( * * , # , * )
( * * , ! , * )
The defining feature of boxed values is
_ | _ : : t iff t : :*
This PTS is functional but not injective
The PTS can be considered stratified into the following levels
□ - sort box
* * , # # , - sort superkind
* , # , ( # ) , ! - sort kind
Int , Bits32_,Char - sort type
3,True,"bob " - sort value
# # On boxed kinds
The boxed kinds ( * and ! ) represent types that have a uniform run time
representation . Due to this , functions may be written that are polymorphic in types of these kinds .
Hence the rules of the form ( * * , ? , ? ) , allowing taking types of boxed kinds as arguments .
the unboxed kind # is inhabited with types that have their own specific run
time representation . Hence you can not write functions that are polymorphic in
unboxed types
# # On sort box , the unboxed tuple , and friends
Although sort box does not appear in the code , it is useful from a theoretical
point of view to talk about certain types such as the types of unboxed tuples .
tuples may have boxed and unboxed arguments , without sort box it would
be impossible to express this since it must be superkind polymorphic . sort box
allows one to express this as ( in the case of the unboxed 2 - tuple )
: □ : □ ∀k1 : s1 ∀k2 : s2 ∀t1 : : k2 . ( # t1 , t2 # )
However , although this is a valid typing of what it would mean if a unboxed
tuple were not fully applied , since we do not have any rules of form ( # # , ? , ? ) or
( □ , ? , ? ) this type obviously does not typecheck . Which is what enforces the
invarient that unboxed tuples are always fully applied , and is also why we do
not need a code representation of sort box .
# # # Do we need a superbox ?
You will notice that if you look at the axioms involving the sorts , you end up
with a disjoint graph
□ - the box
/ \
* * # # - superkind
/\ \
* ! # ( # ) - kind
This is simply due to the fact that nothing is polymorphic in unboxed tuples of
kind ( # ) so we never need to refer to any super - sorts of them . We can add sorts
( # # ) , ( □ ) and □ □ to fill in the gaps , but since these sorts will never appear in
code or discourse , we will ignore them from now on .
□ □ - sort superbox
/ \
□ ( □ ) - sort box
/ \ \
* * # # ( # # ) - sort superkind
/\ \ |
* ! # ( # ) - sort kind
# Jhc Core Type System
Jhc's core is based on a pure type system. A pure type system (also called a
PTS) is actually a parameterized set of type systems. Jhc's version is
described by the following.
Sorts = (*, !, **, #, (#), ##, □)
Axioms = (*:**, #:##, !:**, **:□, ##:□)
* is the kind of boxed values
! is the kind of boxed strict values
# is the kind of unboxed values
(#) is the kind of unboxed tuples
** is the superkind of all boxed value
## is the superkind of all unboxed values
□ superkinds inhabit this
in addition there exist user defined kinds, which are always of supersort ##
The following Rules table shows what sort of abstractions are allowed, a rule
of the form (A,B,C) means you can have functions of things of sort A to things
of sort B and the result is something of sort C. _Function_ in this context
subsumes both term and type level abstractions.
Notice that functions are always boxed, but may be strict if they take an
unboxed tuple as an argument. When a function is strict it means that it is
represented by a pointer to code directly, it cannot be a suspended value that
evaluates to a function.
These type system rules apply to lambda abstractions. It is possible that data
constructors might exist that cannot be given a type on their own with these
rules, even though when fully applied it has a well formed type. An example
would be unboxed tuples. This presents no difficulty as one concludes correctly
that it is a type error for these constructors to ever appear when not fully
saturated with arguments.
as a shortcut we will use *# to mean every combination involving * and #, and so forth.
for instance, (*#,*#,*) means the set (*,*,*) (#,*,*) (*,#,*) (#,#,*)
Rules =
(**,#,*)
(**,!,*)
The defining feature of boxed values is
_|_ :: t iff t::*
This PTS is functional but not injective
The PTS can be considered stratified into the following levels
□ - sort box
**,##, - sort superkind
*,#,(#),! - sort kind
Int,Bits32_,Char - sort type
3,True,"bob" - sort value
## On boxed kinds
The boxed kinds (* and !) represent types that have a uniform run time
representation. Due to this, functions may be written that are polymorphic in types of these kinds.
Hence the rules of the form (**,?,?), allowing taking types of boxed kinds as arguments.
the unboxed kind # is inhabited with types that have their own specific run
time representation. Hence you cannot write functions that are polymorphic in
unboxed types
## On sort box, the unboxed tuple, and friends
Although sort box does not appear in the code, it is useful from a theoretical
point of view to talk about certain types such as the types of unboxed tuples.
Unboxed tuples may have boxed and unboxed arguments, without sort box it would
be impossible to express this since it must be superkind polymorphic. sort box
allows one to express this as (in the case of the unboxed 2-tuple)
∀s1:□ ∀s2:□ ∀k1:s1 ∀k2:s2 ∀t1:k1 ∀t2:k2 . (# t1, t2 #)
However, although this is a valid typing of what it would mean if a unboxed
tuple were not fully applied, since we do not have any rules of form (##,?,?) or
(□,?,?) this type obviously does not typecheck. Which is what enforces the
invarient that unboxed tuples are always fully applied, and is also why we do
not need a code representation of sort box.
### Do we need a superbox?
You will notice that if you look at the axioms involving the sorts, you end up
with a disjoint graph
□ - the box
/ \
** ## - superkind
/\ \
* ! # (#) - kind
This is simply due to the fact that nothing is polymorphic in unboxed tuples of
kind (#) so we never need to refer to any super-sorts of them. We can add sorts
(##),(□) and □□ to fill in the gaps, but since these sorts will never appear in
code or discourse, we will ignore them from now on.
□□ - sort superbox
/ \
□ (□) - sort box
/ \ \
** ## (##) - sort superkind
/\ \ |
* ! # (#) - sort kind
-}
ptsAxioms :: Map.Map ESort ESort
ptsAxioms = Map.fromList [
(EStar,EStarStar),
(EBang,EStarStar),
(EHash,EHashHash),
(ETuple,EHashHash)
]
ptsRulesMap :: Map.Map (ESort,ESort) ESort
ptsRulesMap = Map.fromList [ ((a,b),c) | (as,bs,c) <- ptsRules, a <- as, b <- bs ] where
starHashBang = [EStar,EHash,EBang]
ptsRules = [
(starHashBang,ETuple:starHashBang,EStar),
([ETuple],ETuple:starHashBang,EBang),
([EStarStar],starHashBang,EStar),
([EStarStar],[EStarStar],EStarStar),
([EStarStar],[EHashHash],EHashHash),
([EHashHash],[EHashHash],EHashHash)
]
canBeBox x | getType (getType x) == ESort EStarStar = True
canBeBox _ = False
tBox = mktBox eStar
monadicLookup key m = case Map.lookup key m of
Just x -> return x
Nothing -> fail "Key not found"
instance CanType E where
type TypeOf E = E
getType (ESort s) = ESort $ getType s
getType (ELit l) = getType l
getType (EVar v) = getType v
getType e@(EPi TVr { tvrType = a } b)
| isUnknown typa || isUnknown typb = Unknown
| otherwise = maybe (error $ "E.TypeCheck.getType: " ++ show (e,getType a,getType b)) ESort $ do
ESort s1 <- return $ getType a
ESort s2 <- return $ getType b
monadicLookup (s1,s2) ptsRulesMap
where typa = getType a; typb = getType b
getType (EAp (ELit LitCons { litType = EPi tvr a }) b) = getType (subst tvr b a)
getType (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = getType (foldl eAp af (litArgs lc ++ [b]))
getType (EAp (EPi tvr a) b) = getType (subst tvr b a)
getType e@(EAp a b) = ans where
ans = if isUnknown typa then Unknown else if a == tBox || typa == tBox then tBox else (case a of
(ELit LitCons {}) -> error $ "getType: application of type alias " ++ (render $ parens $ ePretty e)
_ -> eAp typa b)
typa = getType a
getType (ELam (TVr { tvrIdent = x, tvrType = a}) b) = EPi (tVr x a) (getType b)
getType (ELetRec _ e) = getType e
getType ECase {eCaseType = ty} = ty
getType (EError _ e) = e
getType (EPrim _ _ t) = t
getType Unknown = Unknown
instance CanType ESort where
type TypeOf ESort = ESort
getType (ESortNamed _) = EHashHash
getType s = case Map.lookup s ptsAxioms of
Just s -> s
Nothing -> error $ "getType: " ++ show s
instance CanType TVr where
type TypeOf TVr = E
getType = tvrType
instance CanType (Lit x t) where
type TypeOf (Lit x t) = t
getType l = litType l
instance CanType e => CanType (Alt e) where
type TypeOf (Alt e) = TypeOf e
getType (Alt _ e) = getType e
sortSortLike (ESort s) = isEHashHash s || isEStarStar s
sortSortLike _ = False
sortKindLike (ESort s) = not (isEHashHash s) && not (isEStarStar s)
sortKindLike e = sortSortLike (getType e)
sortTypeLike ESort {} = False
sortTypeLike e = sortKindLike (getType e)
sortTermLike ESort {} = False
sortTermLike e = sortTypeLike (getType e)
withContextDoc s a = withContext (render s) a
inferType :: (ContextMonad m, ContextOf m ~ String) => DataTable -> [(TVr,E)] -> E -> m E
inferType dataTable ds e = rfc e where
inferType' ds e = inferType dataTable ds e
prettyE = ePretty
rfc e = withContextDoc (text "fullCheck:" </> prettyE e) (fc e >>= strong')
rfc' nds e = withContextDoc (text "fullCheck':" </> prettyE e) (inferType' nds e)
strong' e = withContextDoc (parens $ text "Strong:" </> prettyE e) $ strong ds e
fc s@(ESort _) = return $ getType s
fc (ELit lc@LitCons {}) | let lc' = updateLit dataTable lc, litAliasFor lc /= litAliasFor lc' = fail $ "Alias not correct: " ++ show (lc, litAliasFor lc')
fc (ELit LitCons { litName = n, litArgs = es, litType = t}) | nameType n == TypeConstructor, Just _ <- fromUnboxedNameTuple n = do
withContext ("Checking Unboxed Tuple: " ++ show n) $ do
valid t
es' <- mapM rfc es
strong' t
fc e@(ELit LitCons { litName = n, litArgs = es, litType = t}) = do
withContext ("Checking Constructor: " ++ show e) $ do
valid t
es' <- mapM rfc es
t' <- strong' t
let sts = slotTypes dataTable n t
les = length es
lsts = length sts
withContext ("Checking Args: " ++ show (sts,es')) $ do
unless (les == lsts || (les < lsts && isEPi t')) $ do
fail "constructor with wrong number of arguments"
zipWithM_ eq sts es'
return t'
fc e@(ELit _) = let t = getType e in valid t >> return t
fc (EVar (TVr { tvrIdent = eid })) | eid == emptyId = fail "variable with nothing!"
fc (EVar (TVr { tvrType = t})) = valid t >> strong' t
fc (EPi (TVr { tvrIdent = n, tvrType = at}) b) = do
ESort a <- rfc at
ESort b <- rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
liftM ESort $ monadicLookup (a,b) ptsRulesMap
fc ( ELam tvr@(TVr n at ) b ) = valid at > > rfc ' [ d | d@(v , _ ) < - ds , tvrIdent v /= n ] b > > = \b ' - > ( strong ' $ EPi tvr b ' )
fc (ELam tvr@(TVr { tvrIdent = n, tvrType = at}) b) = do
withContext "Checking Lambda" $ do
valid at
b' <- withContext "Checking Lambda Body" $ rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
withContext "Checking lambda pi" $ strong' $ EPi tvr b'
fc (EAp (EPi tvr e) b) = rfc (subst tvr b e)
fc (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = rfc (EAp (foldl eAp af (litArgs lc)) b)
fc (EAp a b) = do
withContextDoc (text "EAp:" </> parens (prettyE a) </> parens (prettyE b)) $ do
a' <- rfc a
if a' == tBox then return tBox else strong' (eAp a' b)
fc (ELetRec vs e) = do
let ck (TVr { tvrIdent = eid },_) | eid == emptyId = fail "binding of empty var"
ck (tv@(TVr { tvrType = t}),e) = withContextDoc (hsep [text "Checking Let: ", parens (pprint tv),text " = ", parens $ prettyE e ]) $ do
when (getType t == eHash && not (isEPi t)) $ fail $ "Let binding unboxed value: " ++ show (tv,e)
valid' nds t
fceq nds e t
nds = vs ++ ds
mapM_ ck vs
when (hasRepeatUnder (tvrIdent . fst) vs) $ fail "Repeat Variable in ELetRec"
inferType' nds e
strong
fc (EError _ e) = valid e >> (strong' e)
fc (EPrim _ ts t) = mapM_ valid ts >> valid t >> ( strong' t)
TODO - this is a hack to get around case of constants .
withContext "Checking typelike pattern binding case" $ do
et <- rfc e
withContext "Checking typelike default binding" $ eq et (getType b)
verifyPats (casePats ec)
ps <- mapM (strong' . getType) $ casePats ec
withContext "Checking typelike pattern equality" $ eqAll (et:ps)
strong' dt
withContext "Checking typelike binding case" $ do
et <- rfc e
withContext "Checking typelike default binding" $ eq et (getType b)
withContext "Checking typelike alternatives" $ mapM_ (calt e) as
verifyPats (casePats ec)
ps <- withContext "Getting pattern types" $ mapM (strong' . getType) $ casePats ec
withContext "checking typelike pattern equality" $ eqAll (et:ps)
withContext "Evaluating Case Type" $ strong' dt
fc ec@ECase { eCaseScrutinee =e, eCaseBind = b } = do
withContext "Checking plain case" $ do
et <- rfc e
withContext "Checking default binding" $ eq et (getType b)
bs <- withContext "Checking case bodies" $ mapM rfc (caseBodies ec)
ect <- strong' (eCaseType ec)
withContext "Checking case bodies have equal types" $ eqAll (ect:bs)
verifyPats (casePats ec)
ps <- mapM (strong' . getType) $ casePats ec
withContext "checking pattern equality" $ eqAll (et:ps)
return ect
fc Unknown = return Unknown
calt (EVar v) (Alt l e) = do
let nv = followAliases undefined (patToLitEE l)
rfc (subst' v nv e)
calt _ (Alt _ e) = rfc e
verifyPats xs = do
mapM_ verifyPats' xs
when (hasRepeatUnder litHead xs) $ fail "Duplicate case alternatives"
verifyPats' LitCons { litArgs = xs } = when (hasRepeatUnder id (filter (/= emptyId) $ map tvrIdent xs)) $ fail "Case pattern is non-linear"
verifyPats' _ = return ()
eqAll ts = withContextDoc (text "eqAll" </> list (map prettyE ts)) $ foldl1M_ eq ts
valid s = valid' ds s
valid' nds ESort {} = return ()
valid' nds s
| Unknown <- s = return ()
| otherwise = withContextDoc (text "valid:" <+> prettyE s) (do t <- inferType' nds s; valid' nds t)
eq box t2 | boxCompat box t2 = return t2
eq t1 box | boxCompat box t1 = return t1
box = = , canBeBox t2 = return t2
eq t1 box | box = = , canBeBox t1 = return t1
eq Unknown t2 = return t2
eq t1 Unknown = return t1
eq t1 t2 = eq' ds t1 t2
eq' nds t1 t2 = do
e1 <- strong nds (t1)
e2 <- strong nds (t2)
case typesCompatable e1 e2 of
Just () -> return (e1)
Nothing -> failDoc $ text "eq:" <+> align $ vcat [ prettyE (e1), prettyE (e2) ]
fceq nds e1 t2 = do
withContextDoc (hsep [text "fceq:", align $ vcat [parens $ prettyE e1, parens $ prettyE t2]]) $ do
t1 <- inferType' nds e1
eq' nds t1 t2
boxCompat (ELit (LitCons { litName = n })) t | Just e <- fromConjured modBox n = e == getType t
boxCompat _ _ = False
class CanTypeCheck a where
typecheck :: Monad m => DataTable -> a -> m E
infertype :: CanTypeCheck a => DataTable -> a -> E
infertype env a = case typecheck env a of
Left s -> error $ "infertype: " ++ s
Right x -> x
instance CanTypeCheck E where
typecheck dataTable e = case runContextEither $ typeInfer'' dataTable [] e of
Left ss -> fail $ "\n>>> internal error:\n" ++ unlines ss
Right v -> return v
instance CanTypeCheck TVr where
typecheck dt tvr = do
typecheck dt (getType tvr)
return $ getType tvr
instance CanTypeCheck (Lit a E) where
typecheck dt LitCons { litType = t } = typecheck dt t >> return t
typecheck dt LitInt { litType = t } = typecheck dt t >> return t
TODO , types might be bound in scrutinization
instance CanTypeCheck (Alt E) where
typecheck dt (Alt l e) = typecheck dt l >> typecheck dt e
typeInfer :: DataTable -> E -> E
typeInfer dataTable e = case runContextEither $ typeInfer'' dataTable [] e of
Left ss -> error $ "\n>>> internal error:\n" ++ unlines (tail ss)
Right v -> v
typeInfer' :: DataTable -> [(TVr,E)] -> E -> E
typeInfer' dataTable ds e = case runContextEither $ typeInfer'' dataTable ds e of
Left ss -> error $ "\n>>> internal error:\n" ++ unlines (tail ss)
Right v -> v
data TcEnv = TcEnv {
tcContext :: [String]
}
tcContext_u f r@TcEnv{tcContext = x} = r{tcContext = f x}
newtype Tc a = Tc (Reader TcEnv a)
deriving(Monad,Functor,MonadReader TcEnv)
instance ContextMonad Tc where
type ContextOf Tc = String
withContext s = local (tcContext_u (s:))
tcE : : E - > Tc E
tcE e = rfc e where
rfc e = withContextDoc ( text " tcE : " < / > ePretty e ) ( fc e > > = strong ' )
strong ' e = do
ds < - asks tcDefns
withContextDoc ( text " tcE.strong : " < / > ePretty e ) $ strong ds e
fc s@ESort { } = return $ getType s
fc ( ELit LitCons { litType = t } ) = strong ' t
fc e@ELit { } = strong ' ( )
fc ( EVar TVr { tvrIdent = eid } ) | eid = = emptyId = fail " variable with nothing ! "
fc ( EVar TVr { tvrType = t } ) = strong ' t
fc ( EPi TVr { tvrIdent = n , tvrType = at } b ) = do
ESort a < - rfc at
ESort b < - local ( ( \ds - > [ d | d@(v , _ ) < - ds , tvrIdent v /= n ] ) ) $ rfc b
liftM ESort $ monadicLookup ( a , b ) ptsRulesMap
fc ( ELam tvr@TVr { tvrIdent = n , tvrType = at } b ) = do
at ' < - strong ' at
b ' < - local ( ( \ds - > [ d | d@(v , _ ) < - ds , tvrIdent v /= n ] ) ) $ rfc b
return ( EPi ( tVr n at ' ) b ' )
fc ( EAp ( EPi tvr e ) b ) = do
b < - strong ' b
rfc ( subst tvr b e )
fc ( EAp ( ELit lc@LitCons { litAliasFor = Just af } ) b ) = fc ( EAp ( foldl eAp af ( ) ) b )
fc ( EAp a b ) = do
a ' < - rfc a
if a ' = = then return else strong ' ( eAp a ' b )
fc ( ELetRec vs e ) = local ( ( vs + + ) ) $ rfc e
fc ( EError _ e ) = strong ' e
fc ( EPrim _ ts t ) = strong ' t
fc ECase { eCaseType = ty } = do
strong ' ty
fc Unknown = return Unknown
fc e = failDoc $ text " what 's this ? " < / > ( ePretty e )
tcE :: E -> Tc E
tcE e = rfc e where
rfc e = withContextDoc (text "tcE:" </> ePretty e) (fc e >>= strong')
strong' e = do
ds <- asks tcDefns
withContextDoc (text "tcE.strong:" </> ePretty e) $ strong ds e
fc s@ESort {} = return $ getType s
fc (ELit LitCons { litType = t }) = strong' t
fc e@ELit {} = strong' (getType e)
fc (EVar TVr { tvrIdent = eid }) | eid == emptyId = fail "variable with nothing!"
fc (EVar TVr { tvrType = t}) = strong' t
fc (EPi TVr { tvrIdent = n, tvrType = at} b) = do
ESort a <- rfc at
ESort b <- local (tcDefns_u (\ds -> [ d | d@(v,_) <- ds, tvrIdent v /= n ])) $ rfc b
liftM ESort $ monadicLookup (a,b) ptsRulesMap
fc (ELam tvr@TVr { tvrIdent = n, tvrType = at} b) = do
at' <- strong' at
b' <- local (tcDefns_u (\ds -> [ d | d@(v,_) <- ds, tvrIdent v /= n ])) $ rfc b
return (EPi (tVr n at') b')
fc (EAp (EPi tvr e) b) = do
b <- strong' b
rfc (subst tvr b e)
fc (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = fc (EAp (foldl eAp af (litArgs lc)) b)
fc (EAp a b) = do
a' <- rfc a
if a' == tBox then return tBox else strong' (eAp a' b)
fc (ELetRec vs e) = local (tcDefns_u (vs ++)) $ rfc e
fc (EError _ e) = strong' e
fc (EPrim _ ts t) = strong' t
fc ECase { eCaseType = ty } = do
strong' ty
fc Unknown = return Unknown
fc e = failDoc $ text "what's this? " </> (ePretty e)
-}
typeInfer'' :: (ContextMonad m, ContextOf m ~ String) => DataTable -> [(TVr,E)] -> E -> m E
typeInfer'' dataTable ds e = rfc e where
inferType' ds e = typeInfer'' dataTable ds e
rfc e = withContextDoc (text "fullCheck':" </> ePretty e) (fc e >>= strong')
rfc' nds e = withContextDoc (text "fullCheck':" </> ePretty e) (inferType' nds e)
strong' e = withContextDoc (text "Strong':" </> ePretty e) $ strong ds e
fc s@ESort {} = return $ getType s
fc (ELit LitCons { litType = t }) = strong' t
fc e@ELit {} = strong' (getType e)
fc (EVar TVr { tvrIdent = eid }) | eid == emptyId = fail "variable with nothing!"
fc (EVar TVr { tvrType = t}) = strong' t
fc (EPi TVr { tvrIdent = n, tvrType = at} b) = do
ESort a <- rfc at
ESort b <- rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
liftM ESort $ monadicLookup (a,b) ptsRulesMap
fc (ELam tvr@TVr { tvrIdent = n, tvrType = at} b) = do
at' <- strong' at
b' <- rfc' [ d | d@(v,_) <- ds, tvrIdent v /= n ] b
return (EPi (tVr n at') b')
fc (EAp (EPi tvr e) b) = do
b <- strong' b
rfc (subst tvr b e)
fc (EAp (ELit lc@LitCons { litAliasFor = Just af }) b) = fc (EAp (foldl eAp af (litArgs lc)) b)
fc (EAp a b) = do
a' <- rfc a
if a' == tBox then return tBox else strong' (eAp a' b)
fc (ELetRec vs e) = do
let nds = vs ++ ds
strong
inferType' nds e
fc (EError _ e) = strong' e
fc (EPrim _ ts t) = strong' t
fc ECase { eCaseType = ty } = do
strong' ty
fc Unknown = return Unknown
match :: Monad m =>
-> m [(TVr,E)]
match lup vs = \e1 e2 -> liftM Seq.toList $ execWriterT (un e1 e2 etherealIds) where
bvs :: IdSet
bvs = fromList (map tvrIdent vs)
un (EAp a b) (EAp a' b') c = do
un a a' c
un b b' c
un (ELam va ea) (ELam vb eb) c = lam va ea vb eb c
un (EPi va ea) (EPi vb eb) c = lam va ea vb eb c
un (EPi va ea) (ELit LitCons { litName = ar, litArgs = [x,y], litType = lt}) c | ar == tc_Arrow = do
un (tvrType va) x c
un ea y c
un (EPrim s xs t) (EPrim s' ys t') c | length xs == length ys = do
sequence_ [ un x y c | x <- xs | y <- ys]
un t t' c
un (ESort x) (ESort y) c | x == y = return ()
un (ELit (LitInt x t1)) (ELit (LitInt y t2)) c | x == y = un t1 t2 c
un (ELit LitCons { litName = n, litArgs = xs, litType = t }) (ELit LitCons { litName = n', litArgs = ys, litType = t'}) c | n == n' && length xs == length ys = do
sequence_ [ un x y c | x <- xs | y <- ys]
un t t' c
un (EVar TVr { tvrIdent = i, tvrType = t}) (EVar TVr {tvrIdent = j, tvrType = u}) c | i == j = un t u c
un (EVar TVr { tvrIdent = i, tvrType = t}) (EVar TVr {tvrIdent = j, tvrType = u}) c | isEtherealId i || isEtherealId j = fail "Expressions don't match"
un (EAp a b) (ELit lc@LitCons { litArgs = bas@(_:_), litType = t }) c = do
let (al:as) = reverse bas
un a (ELit lc { litArgs = reverse as, litType = ePi tvr { tvrType = getType al } t }) c
un b al c
un (EAp a b) (EPi TVr { tvrType = a1 } a2) c = do
un a (ELit litCons { litArgs = [a1], litName = tc_Arrow, litType = EPi tvr { tvrType = getType a2 } (getType a1) }) c
un b a2 c
un (EVar tvr@TVr { tvrIdent = i, tvrType = t}) b c
| i `member` bvs = tell (Seq.singleton (tvr,b))
| otherwise = fail $ "Expressions do not unify: " ++ show tvr ++ show b
un a (EVar tvr) c | Just b <- lup (tvrIdent tvr), not $ isEVar b = un a b c
un a b c | Just a ' < - followAlias undefined a = un a ' b c
un a b c | Just b' <- followAlias undefined b = un a b' c
un a b _ = fail $ "Expressions do not unify: " ++ show a ++ show b
lam va ea vb eb (c:cs) = do
un (tvrType va) (tvrType vb) (c:cs)
un (subst va (EVar va { tvrIdent = c }) ea) (subst vb (EVar vb { tvrIdent = c }) eb) cs
lam _ _ _ _ _ = error "TypeCheck.match: bad."
|
cfd5070ff15dd69684e388efa7fe1ff6c0159b651bfbb8f08a229f9260432107 | yminer/libml | backpropCommonVisitor.ml | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
[ LibML - Machine Learning Library ]
Copyright ( C ) 2002 - 2003 LAGACHERIE
This program is free software ; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation ; either version 2
of the License , or ( at your option ) any later version . This
program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details . You should have
received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc. , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 ,
USA .
SPECIAL NOTE ( the beerware clause ):
This software is free software . However , it also falls under the beerware
special category . That is , if you find this software useful , or use it
every day , or want to grant us for our modest contribution to the
free software community , feel free to send us a beer from one of
your local brewery . Our preference goes to Belgium abbey beers and
irish stout ( Guiness for strength ! ) , but we like to try new stuffs .
Authors :
E - mail : RICORDEAU
E - mail :
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
[LibML - Machine Learning Library]
Copyright (C) 2002 - 2003 LAGACHERIE Matthieu RICORDEAU Olivier
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version. This
program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. You should have
received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
SPECIAL NOTE (the beerware clause):
This software is free software. However, it also falls under the beerware
special category. That is, if you find this software useful, or use it
every day, or want to grant us for our modest contribution to the
free software community, feel free to send us a beer from one of
your local brewery. Our preference goes to Belgium abbey beers and
irish stout (Guiness for strength!), but we like to try new stuffs.
Authors:
Matthieu LAGACHERIE
E-mail :
Olivier RICORDEAU
E-mail :
****************************************************************)
*
The backpropCommonVisitor class
@author
@author
@since 10/08/2003
The backpropCommonVisitor class
@author Matthieu Lagacherie
@author Olivier Ricordeau
@since 10/08/2003
*)
open Nn
open BackpropVisitor
open DefaultVisitor
open CommonNN
class backpropCommonVisitor =
object
inherit [commonNN] backpropVisitor
initializer =
_moduleName = "backpropagation"
method visitCommon (network : ((commonNN) defaultVisitor) nn) =
network#print
end
| null | https://raw.githubusercontent.com/yminer/libml/1475dd87c2c16983366fab62124e8bbfbbf2161b/src/nn/propagate/backpropCommonVisitor.ml | ocaml | * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
[ LibML - Machine Learning Library ]
Copyright ( C ) 2002 - 2003 LAGACHERIE
This program is free software ; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation ; either version 2
of the License , or ( at your option ) any later version . This
program is distributed in the hope that it will be useful ,
but WITHOUT ANY WARRANTY ; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
GNU General Public License for more details . You should have
received a copy of the GNU General Public License
along with this program ; if not , write to the Free Software
Foundation , Inc. , 59 Temple Place - Suite 330 , Boston , MA 02111 - 1307 ,
USA .
SPECIAL NOTE ( the beerware clause ):
This software is free software . However , it also falls under the beerware
special category . That is , if you find this software useful , or use it
every day , or want to grant us for our modest contribution to the
free software community , feel free to send us a beer from one of
your local brewery . Our preference goes to Belgium abbey beers and
irish stout ( Guiness for strength ! ) , but we like to try new stuffs .
Authors :
E - mail : RICORDEAU
E - mail :
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
[LibML - Machine Learning Library]
Copyright (C) 2002 - 2003 LAGACHERIE Matthieu RICORDEAU Olivier
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version. This
program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. You should have
received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
USA.
SPECIAL NOTE (the beerware clause):
This software is free software. However, it also falls under the beerware
special category. That is, if you find this software useful, or use it
every day, or want to grant us for our modest contribution to the
free software community, feel free to send us a beer from one of
your local brewery. Our preference goes to Belgium abbey beers and
irish stout (Guiness for strength!), but we like to try new stuffs.
Authors:
Matthieu LAGACHERIE
E-mail :
Olivier RICORDEAU
E-mail :
****************************************************************)
*
The backpropCommonVisitor class
@author
@author
@since 10/08/2003
The backpropCommonVisitor class
@author Matthieu Lagacherie
@author Olivier Ricordeau
@since 10/08/2003
*)
open Nn
open BackpropVisitor
open DefaultVisitor
open CommonNN
class backpropCommonVisitor =
object
inherit [commonNN] backpropVisitor
initializer =
_moduleName = "backpropagation"
method visitCommon (network : ((commonNN) defaultVisitor) nn) =
network#print
end
|
|
4105e55fc76f760d84a71e04af61f176fbb850f48fb4a7625e8ff319065db1ec | haskus/haskus-system | Diagrams.hs | # LANGUAGE FlexibleContexts #
module Demo.Diagrams
( topBarDiag
, infoPageDiag
, customPage
)
where
import Haskus.System
import Haskus.System.Graphics.Diagrams
import Haskus.Format.String
import Haskus.System.Linux.Info
-- | Top-bar
topBarDiag :: Float -> Float -> VDiagram
topBarDiag screenWidth _screenHeight = diag
where
diag = mconcat [pbts, bgrect]
lbls = ["Info","Display", "Terminal","Art", "Canvas"]
bts = [ lbl n t | (t,n) <- lbls `zip` [(1::Int)..]]
pbts = position (zip (map mkPoint [0..]) bts)
|> translateY 5.0
|> alignBy (V2 1 0) (-1)
|> translateX 5.0
mkPoint x = p2 (x*75,0)
-- bar background
bgrect = rect screenWidth 20.0
|> lw none
|> fc white
|> alignBy (V2 1 0) (-1)
|> alignBy (V2 0 1) (-1)
-- button + label
lbl n t = (btn n <> lbt t)
|> alignBy (V2 1 0) 1
-- button
btn n = (btt ("F"++show n) <> btnbg)
|> translateX (-12)
|> alignBy (V2 0 1) (-1)
|> translateY (-3)
-- button background
btnbg = square 16
|> fc lightgray
-- button text
btt t = text t
| > fontSize ( local 10.0 )
|> scale 10.0
|> fc black
|> center
-- label text
lbt t = text t
| > fontSize ( normalized 50.0 )
|> scale 10.0
|> fc black
infoPageDiag :: SystemInfo -> VDiagram
infoPageDiag info = d
where
d = position (join tss)
tss = [ [(p2 (0,-2*y), lbl), (p2 (10,-2*y), val)] | ((lbl,val),y) <- ts `zip` [0..]]
ts = [ mt "OS name" systemName
, mt "Release" systemRelease
, mt "Version" systemVersion
, mt "Machine" systemMachine
, mt "Network name" systemNodeName
]
mt lbl f = ( text (lbl ++ ":")
, text (fromCStringBuffer (f info))
)
customPage :: [String] -> VDiagram
customPage strs = d
where
d = position tss
tss = [ (p2 (0,-2*y), lbl) | (lbl,y) <- ts `zip` [0..]]
ts = fmap mk strs
mk x = text x
| null | https://raw.githubusercontent.com/haskus/haskus-system/38b3a363c26bc4d82e3493d8638d46bc35678616/haskus-system-examples/src/demo/Demo/Diagrams.hs | haskell | | Top-bar
bar background
button + label
button
button background
button text
label text | # LANGUAGE FlexibleContexts #
module Demo.Diagrams
( topBarDiag
, infoPageDiag
, customPage
)
where
import Haskus.System
import Haskus.System.Graphics.Diagrams
import Haskus.Format.String
import Haskus.System.Linux.Info
topBarDiag :: Float -> Float -> VDiagram
topBarDiag screenWidth _screenHeight = diag
where
diag = mconcat [pbts, bgrect]
lbls = ["Info","Display", "Terminal","Art", "Canvas"]
bts = [ lbl n t | (t,n) <- lbls `zip` [(1::Int)..]]
pbts = position (zip (map mkPoint [0..]) bts)
|> translateY 5.0
|> alignBy (V2 1 0) (-1)
|> translateX 5.0
mkPoint x = p2 (x*75,0)
bgrect = rect screenWidth 20.0
|> lw none
|> fc white
|> alignBy (V2 1 0) (-1)
|> alignBy (V2 0 1) (-1)
lbl n t = (btn n <> lbt t)
|> alignBy (V2 1 0) 1
btn n = (btt ("F"++show n) <> btnbg)
|> translateX (-12)
|> alignBy (V2 0 1) (-1)
|> translateY (-3)
btnbg = square 16
|> fc lightgray
btt t = text t
| > fontSize ( local 10.0 )
|> scale 10.0
|> fc black
|> center
lbt t = text t
| > fontSize ( normalized 50.0 )
|> scale 10.0
|> fc black
infoPageDiag :: SystemInfo -> VDiagram
infoPageDiag info = d
where
d = position (join tss)
tss = [ [(p2 (0,-2*y), lbl), (p2 (10,-2*y), val)] | ((lbl,val),y) <- ts `zip` [0..]]
ts = [ mt "OS name" systemName
, mt "Release" systemRelease
, mt "Version" systemVersion
, mt "Machine" systemMachine
, mt "Network name" systemNodeName
]
mt lbl f = ( text (lbl ++ ":")
, text (fromCStringBuffer (f info))
)
customPage :: [String] -> VDiagram
customPage strs = d
where
d = position tss
tss = [ (p2 (0,-2*y), lbl) | (lbl,y) <- ts `zip` [0..]]
ts = fmap mk strs
mk x = text x
|
1ce5d5af1e9738b48c781a7b0462e58e942ecde206ef35a4201dd4012f70425f | tek/ribosome | TestConfig.hs | module Ribosome.Host.Test.Data.TestConfig where
import Ribosome.Host.Data.HostConfig (HostConfig (HostConfig), dataLogConc)
data TestConfig =
TestConfig {
freezeTime :: Bool,
host :: HostConfig
}
deriving stock (Eq, Show, Generic)
instance Default TestConfig where
def =
TestConfig False (HostConfig def { dataLogConc = False })
| null | https://raw.githubusercontent.com/tek/ribosome/a676b4f0085916777bfdacdcc761f82d933edb80/packages/host-test/lib/Ribosome/Host/Test/Data/TestConfig.hs | haskell | module Ribosome.Host.Test.Data.TestConfig where
import Ribosome.Host.Data.HostConfig (HostConfig (HostConfig), dataLogConc)
data TestConfig =
TestConfig {
freezeTime :: Bool,
host :: HostConfig
}
deriving stock (Eq, Show, Generic)
instance Default TestConfig where
def =
TestConfig False (HostConfig def { dataLogConc = False })
|
|
908e394089d6cc06ae0b814b22ea8e233cc6f06f93036f6d11d0858afb515e9f | HumbleUI/HumbleUI | settings.clj | (ns examples.settings
(:require
[examples.state :as state]
[io.github.humbleui.debug :as debug]
[io.github.humbleui.paint :as paint]
[io.github.humbleui.ui :as ui]))
(def ui
(ui/with-scale scale
(let [padding-inner 12
fill-bg (paint/fill 0xFFF2F2F2)
stroke-bg (paint/stroke 0xFFE0E0E0 (* 0.5 scale))
fill-delimiter (paint/fill 0xFFE7E7E7)]
(ui/padding 20 20
(ui/valign 0
(ui/rounded-rect {:radius 6} fill-bg
(ui/rounded-rect {:radius 6} stroke-bg
(ui/padding padding-inner padding-inner
(ui/column
(ui/row
(ui/valign 0.5
(ui/label "On top"))
[:stretch 1 nil]
(ui/toggle state/*floating))
(ui/gap 0 padding-inner)
(ui/rect fill-delimiter
(ui/gap 0 1))
(ui/gap 0 padding-inner)
(ui/row
(ui/valign 0.5
(ui/label "Debug"))
[:stretch 1 nil]
(ui/toggle debug/*enabled?)))))))))))
| null | https://raw.githubusercontent.com/HumbleUI/HumbleUI/5bf5e2a47bf99818c6e7f712c43462d4ecb6254c/dev/examples/settings.clj | clojure | (ns examples.settings
(:require
[examples.state :as state]
[io.github.humbleui.debug :as debug]
[io.github.humbleui.paint :as paint]
[io.github.humbleui.ui :as ui]))
(def ui
(ui/with-scale scale
(let [padding-inner 12
fill-bg (paint/fill 0xFFF2F2F2)
stroke-bg (paint/stroke 0xFFE0E0E0 (* 0.5 scale))
fill-delimiter (paint/fill 0xFFE7E7E7)]
(ui/padding 20 20
(ui/valign 0
(ui/rounded-rect {:radius 6} fill-bg
(ui/rounded-rect {:radius 6} stroke-bg
(ui/padding padding-inner padding-inner
(ui/column
(ui/row
(ui/valign 0.5
(ui/label "On top"))
[:stretch 1 nil]
(ui/toggle state/*floating))
(ui/gap 0 padding-inner)
(ui/rect fill-delimiter
(ui/gap 0 1))
(ui/gap 0 padding-inner)
(ui/row
(ui/valign 0.5
(ui/label "Debug"))
[:stretch 1 nil]
(ui/toggle debug/*enabled?)))))))))))
|
|
8a4b3a3b2bb130c83c01f0244b902cbb63e4dfa490e3cbec760fd95e7f238a42 | clojerl/clojerl | clojerl.Future.erl | -module('clojerl.Future').
-include("clojerl.hrl").
-include("clojerl_int.hrl").
-behaviour(gen_server).
-behavior('erlang.io.ICloseable').
-behavior('clojerl.IBlockingDeref').
-behavior('clojerl.IDeref').
-behavior('clojerl.IEquiv').
-behavior('clojerl.IHash').
-behavior('clojerl.IPending').
-behavior('clojerl.IStringable').
-export([ ?CONSTRUCTOR/1
, cancel/1
, 'cancelled?'/1
, 'done?'/1
]).
-export([close/1]).
-export([deref/3]).
-export([deref/1]).
-export(['realized?'/1]).
-export([equiv/2]).
-export([hash/1]).
-export([str/1]).
%% eval
-export([ eval/2
]).
%% gen_server callbacks
-export([ start_link/2
, init/1
, handle_call/3
, handle_cast/2
, handle_info/2
, terminate/2
, code_change/3
]).
-export_type([type/0]).
-type type() :: #{ ?TYPE => ?M
, id => binary()
, pid => pid()
}.
-spec ?CONSTRUCTOR(any()) -> type().
?CONSTRUCTOR(Fn) ->
UUID = 'erlang.util.UUID':random(),
Id = 'erlang.util.UUID':str(UUID),
{ok, Pid} = start_link(Id, Fn),
#{ ?TYPE => ?M
, id => Id
, pid => Pid
}.
-spec cancel(type()) -> ok.
cancel(#{?TYPE := ?M, pid := Pid}) ->
ok = gen_server:stop(Pid).
-spec 'cancelled?'(type()) -> boolean().
'cancelled?'(#{?TYPE := ?M, pid := Pid}) ->
not erlang:is_process_alive(Pid).
-spec 'done?'(type()) -> boolean().
'done?'(#{?TYPE := ?M, pid := Pid}) ->
erlang:is_process_alive(Pid) andalso gen_server:call(Pid, 'done?').
%%------------------------------------------------------------------------------
Protocols
%%------------------------------------------------------------------------------
%% clojerl.ICloseable
close(#{?TYPE := ?M, pid := Pid}) ->
ok = gen_server:stop(Pid).
clojerl . IBlockingDeref
deref(#{?TYPE := ?M, pid := Pid}, TimeoutMs, TimeoutVal) ->
try do_deref(Pid, TimeoutMs)
catch exit:{timeout, _} -> TimeoutVal
end.
%% clojerl.IDeref
deref(#{?TYPE := ?M, pid := Pid}) ->
do_deref(Pid, infinity).
%% clojerl.IEquiv
equiv( #{?TYPE := ?M, id := Id}
, #{?TYPE := ?M, id := Id}
) ->
true;
equiv(_, _) ->
false.
clojerl . IHash
hash(#{?TYPE := ?M, id := Id}) ->
erlang:phash2(Id).
%% clojerl.IPending
'realized?'(#{?TYPE := ?M} = Future) ->
'done?'(Future).
%% clojerl.IStringable
str(#{?TYPE := ?M, id := Id}) ->
<<"#<clojerl.Future ", Id/binary, ">">>.
%%------------------------------------------------------------------------------
%% gen_server callbacks
%%------------------------------------------------------------------------------
-type state() :: #{ id => binary()
, fn => function()
, result => ?NIL | {ok, any()}
, pending => queue:queue()
}.
start_link(Id, Fn) ->
gen_server:start_link(?MODULE, {Id, Fn}, []).
-spec init({binary(), function()}) -> {ok, state()}.
init({Id, Fn}) ->
State = #{ id => Id
, fn => Fn
, result => ?NIL
, pending => queue:new()
},
proc_lib:spawn_link(?MODULE, eval, [self(), Fn]),
{ok, State}.
handle_call(deref, From, #{result := ?NIL, pending := Pending} = State0) ->
State1 = State0#{pending := queue:cons(From, Pending)},
{noreply, State1};
handle_call(deref, _From, #{result := Result} = State) ->
{reply, Result, State};
handle_call('done?', _From, #{result := Result} = State) ->
{reply, Result =/= ?NIL, State}.
handle_cast({result, Result}, #{pending := Pending} = State) ->
[gen_server:reply(From, Result) || From <- queue:to_list(Pending)],
{noreply, State#{result := Result, pending := queue:new()}}.
handle_info(_Msg, State) ->
{noreply, State}.
terminate(_Msg, State) ->
{ok, State}.
code_change(_Msg, _From, State) ->
{ok, State}.
%%------------------------------------------------------------------------------
%% Helper functions
%%------------------------------------------------------------------------------
-spec eval(pid(), function()) -> ok.
eval(Pid, Fn) ->
Result = try {ok, clj_rt:apply(Fn, [])}
catch _:Error -> {error, Error}
end,
ok = gen_server:cast(Pid, {result, Result}).
-spec do_deref(pid(), timeout()) -> any().
do_deref(Pid, Timeout) ->
case gen_server:call(Pid, deref, Timeout) of
{ok, Value} -> Value;
{error, Error} -> ?ERROR(Error)
end.
| null | https://raw.githubusercontent.com/clojerl/clojerl/506000465581d6349659898dd5025fa259d5cf28/src/erl/lang/clojerl.Future.erl | erlang | eval
gen_server callbacks
------------------------------------------------------------------------------
------------------------------------------------------------------------------
clojerl.ICloseable
clojerl.IDeref
clojerl.IEquiv
clojerl.IPending
clojerl.IStringable
------------------------------------------------------------------------------
gen_server callbacks
------------------------------------------------------------------------------
------------------------------------------------------------------------------
Helper functions
------------------------------------------------------------------------------ | -module('clojerl.Future').
-include("clojerl.hrl").
-include("clojerl_int.hrl").
-behaviour(gen_server).
-behavior('erlang.io.ICloseable').
-behavior('clojerl.IBlockingDeref').
-behavior('clojerl.IDeref').
-behavior('clojerl.IEquiv').
-behavior('clojerl.IHash').
-behavior('clojerl.IPending').
-behavior('clojerl.IStringable').
-export([ ?CONSTRUCTOR/1
, cancel/1
, 'cancelled?'/1
, 'done?'/1
]).
-export([close/1]).
-export([deref/3]).
-export([deref/1]).
-export(['realized?'/1]).
-export([equiv/2]).
-export([hash/1]).
-export([str/1]).
-export([ eval/2
]).
-export([ start_link/2
, init/1
, handle_call/3
, handle_cast/2
, handle_info/2
, terminate/2
, code_change/3
]).
-export_type([type/0]).
-type type() :: #{ ?TYPE => ?M
, id => binary()
, pid => pid()
}.
-spec ?CONSTRUCTOR(any()) -> type().
?CONSTRUCTOR(Fn) ->
UUID = 'erlang.util.UUID':random(),
Id = 'erlang.util.UUID':str(UUID),
{ok, Pid} = start_link(Id, Fn),
#{ ?TYPE => ?M
, id => Id
, pid => Pid
}.
-spec cancel(type()) -> ok.
cancel(#{?TYPE := ?M, pid := Pid}) ->
ok = gen_server:stop(Pid).
-spec 'cancelled?'(type()) -> boolean().
'cancelled?'(#{?TYPE := ?M, pid := Pid}) ->
not erlang:is_process_alive(Pid).
-spec 'done?'(type()) -> boolean().
'done?'(#{?TYPE := ?M, pid := Pid}) ->
erlang:is_process_alive(Pid) andalso gen_server:call(Pid, 'done?').
Protocols
close(#{?TYPE := ?M, pid := Pid}) ->
ok = gen_server:stop(Pid).
clojerl . IBlockingDeref
deref(#{?TYPE := ?M, pid := Pid}, TimeoutMs, TimeoutVal) ->
try do_deref(Pid, TimeoutMs)
catch exit:{timeout, _} -> TimeoutVal
end.
deref(#{?TYPE := ?M, pid := Pid}) ->
do_deref(Pid, infinity).
equiv( #{?TYPE := ?M, id := Id}
, #{?TYPE := ?M, id := Id}
) ->
true;
equiv(_, _) ->
false.
clojerl . IHash
hash(#{?TYPE := ?M, id := Id}) ->
erlang:phash2(Id).
'realized?'(#{?TYPE := ?M} = Future) ->
'done?'(Future).
str(#{?TYPE := ?M, id := Id}) ->
<<"#<clojerl.Future ", Id/binary, ">">>.
-type state() :: #{ id => binary()
, fn => function()
, result => ?NIL | {ok, any()}
, pending => queue:queue()
}.
start_link(Id, Fn) ->
gen_server:start_link(?MODULE, {Id, Fn}, []).
-spec init({binary(), function()}) -> {ok, state()}.
init({Id, Fn}) ->
State = #{ id => Id
, fn => Fn
, result => ?NIL
, pending => queue:new()
},
proc_lib:spawn_link(?MODULE, eval, [self(), Fn]),
{ok, State}.
handle_call(deref, From, #{result := ?NIL, pending := Pending} = State0) ->
State1 = State0#{pending := queue:cons(From, Pending)},
{noreply, State1};
handle_call(deref, _From, #{result := Result} = State) ->
{reply, Result, State};
handle_call('done?', _From, #{result := Result} = State) ->
{reply, Result =/= ?NIL, State}.
handle_cast({result, Result}, #{pending := Pending} = State) ->
[gen_server:reply(From, Result) || From <- queue:to_list(Pending)],
{noreply, State#{result := Result, pending := queue:new()}}.
handle_info(_Msg, State) ->
{noreply, State}.
terminate(_Msg, State) ->
{ok, State}.
code_change(_Msg, _From, State) ->
{ok, State}.
-spec eval(pid(), function()) -> ok.
eval(Pid, Fn) ->
Result = try {ok, clj_rt:apply(Fn, [])}
catch _:Error -> {error, Error}
end,
ok = gen_server:cast(Pid, {result, Result}).
-spec do_deref(pid(), timeout()) -> any().
do_deref(Pid, Timeout) ->
case gen_server:call(Pid, deref, Timeout) of
{ok, Value} -> Value;
{error, Error} -> ?ERROR(Error)
end.
|
4a2093ea6cb260c3b1b1519c19ec8f6bf61386e382ec7f9d30d9f20807386746 | input-output-hk/cardano-wallet | Launcher.hs | # LANGUAGE CPP #
{-# LANGUAGE DeriveAnyClass #-}
# LANGUAGE DeriveGeneric #
# LANGUAGE LambdaCase #
# LANGUAGE NamedFieldPuns #
# LANGUAGE ScopedTypeVariables #
HLINT ignore " Unused LANGUAGE pragma "
-- |
Copyright : © 2018 - 2020 IOHK
-- License: Apache-2.0
--
-- This module contains a mechanism for launching external processes, ensuring
-- that they are terminated on exceptions.
module Cardano.Launcher
( Command (..)
, StdStream(..)
, ProcessHasExited(..)
, withBackendProcess
, withBackendCreateProcess
-- * Logging
, LauncherLog(..)
) where
import Prelude
import Cardano.BM.Data.Severity
( Severity (..) )
import Cardano.BM.Data.Tracer
( HasPrivacyAnnotation (..), HasSeverityAnnotation (..) )
import Cardano.Startup
( killProcess )
import Control.Monad
( join, void )
import Control.Monad.IO.Class
( liftIO )
import Control.Monad.IO.Unlift
( MonadUnliftIO (..) )
import Control.Tracer
( Tracer, contramap, traceWith )
import Data.Either.Combinators
( leftToMaybe )
import Data.List
( isPrefixOf )
import Data.Text
( Text )
import Data.Text.Class
( ToText (..) )
import Fmt
( Buildable (..)
, Builder
, blockListF'
, fmt
, indentF
, (+|)
, (+||)
, (|+)
, (||+)
)
import GHC.Generics
( Generic )
import System.Exit
( ExitCode (..) )
import System.IO
( Handle )
import System.Process
( cleanupProcess, getPid )
import UnliftIO.Async
( race )
import UnliftIO.Concurrent
( forkIO, forkIOWithUnmask, killThread, threadDelay )
import UnliftIO.Exception
( Exception
, IOException
, bracket
, bracket_
, finally
, onException
, tryJust
)
import UnliftIO.MVar
( newEmptyMVar, putMVar, readMVar )
import UnliftIO.Process
( CmdSpec (..)
, CreateProcess (..)
, ProcessHandle
, StdStream (..)
, createProcess
, proc
, waitForProcess
)
import qualified Data.Text as T
| Represent a command to execute . are provided as a list where options
-- are expected to be prefixed with `--` or `-`. For example:
--
-- @
-- Command "cardano-wallet"
-- [ "server"
, " --port " , " 8080 "
-- , "--network", "mainnet"
-- ] (return ())
-- Inherit
-- Inherit
-- @
data Command = Command
{ cmdName :: String
, cmdArgs :: [String]
, cmdSetup :: IO ()
-- ^ An extra action to run _before_ the command
, cmdInput :: StdStream
-- ^ Input to supply to command
, cmdOutput :: StdStream
^ What to do with stdout & stderr
} deriving (Generic)
instance Show Command where
show = show . build
instance Eq Command where
a == b = build a == build b
| Format a command nicely with one argument / option per line .
--
-- e.g.
--
> > > fmt $ buildCommand " cardano - wallet - server " [ " --port " , " 8080 " , " --network " , " mainnet " ] ( return ( ) )
-- cardano-wallet-server
-- --port 8080
-- --network mainnet
buildCommand :: String -> [String] -> Builder
buildCommand name args = mconcat [build name, "\n", indentF 4 argsBuilder]
where
argsBuilder = blockListF' "" build $ snd $ foldl buildOptions ("", []) args
buildOptions :: (String, [String]) -> String -> (String, [String])
buildOptions ("", grp) arg =
(arg, grp)
buildOptions (partial, grp) arg =
if ("--" `isPrefixOf` partial) && not ("--" `isPrefixOf` arg) then
("", grp ++ [partial <> " " <> arg])
else
(arg, grp ++ [partial])
instance Buildable Command where
build (Command name args _ _ _) = buildCommand name args
-- | ProcessHasExited is used by a monitoring thread to signal that the process
-- has exited.
data ProcessHasExited
= ProcessDidNotStart String IOException
| ProcessHasExited String ExitCode
deriving (Show, Eq)
instance Exception ProcessHasExited
-- | Starts a command in the background and then runs an action. If the action
-- finishes (through an exception or otherwise) then the process is terminated
-- (see 'withCreateProcess') for details. If the process exits, the action is
cancelled . The return type reflects those two cases .
--
The action receives the ' ProcessHandle ' and stdin ' Handle ' as arguments .
withBackendProcess
:: MonadUnliftIO m
=> Tracer m LauncherLog
-- ^ Logging
-> Command
-- ^ 'Command' description
-> (Maybe Handle -> ProcessHandle -> m a)
-- ^ Action to execute while process is running.
-> m (Either ProcessHasExited a)
withBackendProcess tr (Command name args before std_in std_out) action =
liftIO before >> withBackendCreateProcess tr process action
where
process = (proc name args) { std_in, std_out, std_err = std_out }
| A variant of ' withBackendProcess ' which accepts a general ' CreateProcess '
-- object. This version also has nicer async properties than
' System . Process.withCreateProcess ' .
--
-- This function should ensure:
--
1 . If the action finishes or throws an exception , then the process is also
-- terminated.
--
2 . After the process is sent the signal to terminate , this function will
-- block until the process has actually exited - unless that takes longer
than the 5 second timeout . After the timeout has lapsed , the process will
-- be sent a kill signal.
--
3 . If the process exits , then the action is cancelled .
--
-- fixme: This is more or less a reimplementation of
' System . Process . Typed.withProcessWait ' ( except for wait timeout ) . The
-- launcher code should be converted to use @typed-process@.
withBackendCreateProcess
:: forall m a. (MonadUnliftIO m)
=> Tracer m LauncherLog
-- ^ Logging
-> CreateProcess
-- ^ 'Command' description
-> (Maybe Handle -> ProcessHandle -> m a)
-- ^ Action to execute while process is running.
-> m (Either ProcessHasExited a)
withBackendCreateProcess tr process action = do
traceWith tr $ MsgLauncherStart name args
exitVar <- newEmptyMVar
res <- fmap join $ tryJust spawnPredicate $ bracket
(createProcess process)
(cleanupProcessAndWait (readMVar exitVar)) $
\(mstdin, _, _, ph) -> do
pid <- maybe "-" (T.pack . show) <$> liftIO (getPid ph)
let tr' = contramap (WithProcessInfo name pid) tr
let tr'' = contramap MsgLauncherWait tr'
traceWith tr' MsgLauncherStarted
interruptibleWaitForProcess tr'' ph (putMVar exitVar)
race (ProcessHasExited name <$> readMVar exitVar) $ bracket_
(traceWith tr' MsgLauncherAction)
(traceWith tr' MsgLauncherActionDone)
(action mstdin ph)
traceWith tr $ MsgLauncherFinish (leftToMaybe res)
pure res
where
-- Exceptions resulting from the @exec@ call for this command. The most
-- likely exception is that the command does not exist. We don't want to
-- catch exceptions thrown by the action. I couldn't find a better way of
-- doing this.
spawnPredicate :: IOException -> Maybe ProcessHasExited
spawnPredicate e
| name `isPrefixOf` show e = Just (ProcessDidNotStart name e)
| otherwise = Nothing
-- Run the 'cleanupProcess' function from the process library, but wait for
-- the process to exit, rather than immediately returning. If the process
-- doesn't exit after timeout, kill it, to avoid blocking indefinitely.
cleanupProcessAndWait getExitStatus ps@(_, _, _, ph) = do
traceWith tr MsgLauncherCleanup
liftIO $ cleanupProcess ps
let timeoutSecs = 5
-- Async exceptions are currently masked because this is running in a
-- bracket cleanup handler. We fork a thread and unmask so that the
-- timeout can be cancelled.
tid <- forkIOWithUnmask $ \unmask -> unmask $ do
threadDelay (timeoutSecs * 1000 * 1000)
traceWith tr (MsgLauncherCleanupTimedOut timeoutSecs)
liftIO (getPid ph >>= mapM_ killProcess)
void getExitStatus `finally` killThread tid
traceWith tr MsgLauncherCleanupFinished
-- Wraps 'waitForProcess' in another thread. This works around the unwanted
behaviour of the process library on Windows where ' waitForProcess ' seems
-- to block all concurrent async actions in the thread.
interruptibleWaitForProcess
:: Tracer m WaitForProcessLog
-> ProcessHandle
-> (ExitCode -> m ())
-> m ()
interruptibleWaitForProcess tr' ph onExit =
void $ forkIO (waitThread `onException` continue)
where
waitThread = do
traceWith tr' MsgWaitBefore
status <- waitForProcess ph
traceWith tr' (MsgWaitAfter status)
onExit status
continue = do
traceWith tr' MsgWaitCancelled
onExit (ExitFailure 256)
(name, args) = getCreateProcessNameArgs process
-- | Recover the command name and arguments from a 'proc', just for logging.
getCreateProcessNameArgs :: CreateProcess -> (FilePath, [String])
getCreateProcessNameArgs process = case cmdspec process of
ShellCommand cmd -> (cmd, [])
RawCommand cmd args -> (cmd, args)
{-------------------------------------------------------------------------------
Logging
-------------------------------------------------------------------------------}
data LauncherLog
= MsgLauncherStart String [String]
| WithProcessInfo String Text LaunchedProcessLog
| MsgLauncherCleanup
| MsgLauncherCleanupTimedOut Int
| MsgLauncherCleanupFinished
| MsgLauncherFinish (Maybe ProcessHasExited)
deriving (Show, Eq, Generic)
data LaunchedProcessLog
= MsgLauncherStarted
| MsgLauncherAction
| MsgLauncherActionDone
| MsgLauncherWait WaitForProcessLog
deriving (Show, Eq, Generic)
data WaitForProcessLog
= MsgWaitBefore
| MsgWaitAfter ExitCode
| MsgWaitCancelled
deriving (Show, Eq, Generic)
instance HasPrivacyAnnotation LauncherLog
instance HasSeverityAnnotation LauncherLog where
getSeverityAnnotation = \case
MsgLauncherStart _ _ -> Notice
WithProcessInfo _ _ msg -> getSeverityAnnotation msg
MsgLauncherFinish Nothing -> Debug
MsgLauncherFinish (Just (ProcessDidNotStart _ _)) -> Error
MsgLauncherFinish (Just (ProcessHasExited _ st)) -> case st of
ExitSuccess -> Notice
ExitFailure _ -> Error
MsgLauncherCleanup -> Debug
MsgLauncherCleanupTimedOut _ -> Notice
MsgLauncherCleanupFinished -> Debug
instance HasPrivacyAnnotation LaunchedProcessLog
instance HasSeverityAnnotation LaunchedProcessLog where
getSeverityAnnotation = \case
MsgLauncherStarted -> Info
MsgLauncherWait msg -> getSeverityAnnotation msg
MsgLauncherAction -> Debug
MsgLauncherActionDone -> Notice
instance HasPrivacyAnnotation WaitForProcessLog
instance HasSeverityAnnotation WaitForProcessLog where
getSeverityAnnotation = \case
MsgWaitBefore -> Debug
MsgWaitAfter _ -> Debug
MsgWaitCancelled -> Debug
instance ToText ProcessHasExited where
toText (ProcessHasExited name code) =
"Child process "+|name|+" exited with "+|statusText code|+""
toText (ProcessDidNotStart name _e) =
"Could not start "+|name|+""
instance ToText LauncherLog where
toText ll = fmt $ case ll of
MsgLauncherStart cmd args ->
"Starting process "+|buildCommand cmd args|+""
WithProcessInfo name pid msg ->
"["+|name|+"."+|pid|+"] "+|toText msg|+""
MsgLauncherFinish Nothing ->
"Action finished"
MsgLauncherFinish (Just exited) -> build $ toText exited
MsgLauncherCleanup ->
"Begin process cleanup"
MsgLauncherCleanupTimedOut t ->
"Timed out waiting for process to exit after "+|t|+" seconds"
MsgLauncherCleanupFinished ->
"Process cleanup finished"
instance ToText LaunchedProcessLog where
toText = \case
MsgLauncherStarted -> "Process started"
MsgLauncherAction -> "Running withBackend action"
MsgLauncherWait msg -> toText msg
MsgLauncherActionDone -> "withBackend action done. Terminating child process"
instance ToText WaitForProcessLog where
toText = \case
MsgWaitBefore ->
"Waiting for process to exit"
MsgWaitAfter status -> fmt $
"Process exited with "+|statusText status|+""
MsgWaitCancelled ->
"There was an exception waiting for the process"
statusText :: ExitCode -> Text
statusText ExitSuccess = "success"
statusText (ExitFailure n)
| n >= 0 = fmt $ "code "+||n||+" (failure)"
| otherwise = fmt $ "signal "+||(-n)||+""
| null | https://raw.githubusercontent.com/input-output-hk/cardano-wallet/f8455ff02816acfef11131f6fbd6b11334309c2a/lib/launcher/src/Cardano/Launcher.hs | haskell | # LANGUAGE DeriveAnyClass #
|
License: Apache-2.0
This module contains a mechanism for launching external processes, ensuring
that they are terminated on exceptions.
* Logging
are expected to be prefixed with `--` or `-`. For example:
@
Command "cardano-wallet"
[ "server"
, "--network", "mainnet"
] (return ())
Inherit
Inherit
@
^ An extra action to run _before_ the command
^ Input to supply to command
e.g.
cardano-wallet-server
--port 8080
--network mainnet
| ProcessHasExited is used by a monitoring thread to signal that the process
has exited.
| Starts a command in the background and then runs an action. If the action
finishes (through an exception or otherwise) then the process is terminated
(see 'withCreateProcess') for details. If the process exits, the action is
^ Logging
^ 'Command' description
^ Action to execute while process is running.
object. This version also has nicer async properties than
This function should ensure:
terminated.
block until the process has actually exited - unless that takes longer
be sent a kill signal.
fixme: This is more or less a reimplementation of
launcher code should be converted to use @typed-process@.
^ Logging
^ 'Command' description
^ Action to execute while process is running.
Exceptions resulting from the @exec@ call for this command. The most
likely exception is that the command does not exist. We don't want to
catch exceptions thrown by the action. I couldn't find a better way of
doing this.
Run the 'cleanupProcess' function from the process library, but wait for
the process to exit, rather than immediately returning. If the process
doesn't exit after timeout, kill it, to avoid blocking indefinitely.
Async exceptions are currently masked because this is running in a
bracket cleanup handler. We fork a thread and unmask so that the
timeout can be cancelled.
Wraps 'waitForProcess' in another thread. This works around the unwanted
to block all concurrent async actions in the thread.
| Recover the command name and arguments from a 'proc', just for logging.
------------------------------------------------------------------------------
Logging
------------------------------------------------------------------------------ | # LANGUAGE CPP #
# LANGUAGE DeriveGeneric #
# LANGUAGE LambdaCase #
# LANGUAGE NamedFieldPuns #
# LANGUAGE ScopedTypeVariables #
HLINT ignore " Unused LANGUAGE pragma "
Copyright : © 2018 - 2020 IOHK
module Cardano.Launcher
( Command (..)
, StdStream(..)
, ProcessHasExited(..)
, withBackendProcess
, withBackendCreateProcess
, LauncherLog(..)
) where
import Prelude
import Cardano.BM.Data.Severity
( Severity (..) )
import Cardano.BM.Data.Tracer
( HasPrivacyAnnotation (..), HasSeverityAnnotation (..) )
import Cardano.Startup
( killProcess )
import Control.Monad
( join, void )
import Control.Monad.IO.Class
( liftIO )
import Control.Monad.IO.Unlift
( MonadUnliftIO (..) )
import Control.Tracer
( Tracer, contramap, traceWith )
import Data.Either.Combinators
( leftToMaybe )
import Data.List
( isPrefixOf )
import Data.Text
( Text )
import Data.Text.Class
( ToText (..) )
import Fmt
( Buildable (..)
, Builder
, blockListF'
, fmt
, indentF
, (+|)
, (+||)
, (|+)
, (||+)
)
import GHC.Generics
( Generic )
import System.Exit
( ExitCode (..) )
import System.IO
( Handle )
import System.Process
( cleanupProcess, getPid )
import UnliftIO.Async
( race )
import UnliftIO.Concurrent
( forkIO, forkIOWithUnmask, killThread, threadDelay )
import UnliftIO.Exception
( Exception
, IOException
, bracket
, bracket_
, finally
, onException
, tryJust
)
import UnliftIO.MVar
( newEmptyMVar, putMVar, readMVar )
import UnliftIO.Process
( CmdSpec (..)
, CreateProcess (..)
, ProcessHandle
, StdStream (..)
, createProcess
, proc
, waitForProcess
)
import qualified Data.Text as T
| Represent a command to execute . are provided as a list where options
, " --port " , " 8080 "
data Command = Command
{ cmdName :: String
, cmdArgs :: [String]
, cmdSetup :: IO ()
, cmdInput :: StdStream
, cmdOutput :: StdStream
^ What to do with stdout & stderr
} deriving (Generic)
instance Show Command where
show = show . build
instance Eq Command where
a == b = build a == build b
| Format a command nicely with one argument / option per line .
> > > fmt $ buildCommand " cardano - wallet - server " [ " --port " , " 8080 " , " --network " , " mainnet " ] ( return ( ) )
buildCommand :: String -> [String] -> Builder
buildCommand name args = mconcat [build name, "\n", indentF 4 argsBuilder]
where
argsBuilder = blockListF' "" build $ snd $ foldl buildOptions ("", []) args
buildOptions :: (String, [String]) -> String -> (String, [String])
buildOptions ("", grp) arg =
(arg, grp)
buildOptions (partial, grp) arg =
if ("--" `isPrefixOf` partial) && not ("--" `isPrefixOf` arg) then
("", grp ++ [partial <> " " <> arg])
else
(arg, grp ++ [partial])
instance Buildable Command where
build (Command name args _ _ _) = buildCommand name args
data ProcessHasExited
= ProcessDidNotStart String IOException
| ProcessHasExited String ExitCode
deriving (Show, Eq)
instance Exception ProcessHasExited
cancelled . The return type reflects those two cases .
The action receives the ' ProcessHandle ' and stdin ' Handle ' as arguments .
withBackendProcess
:: MonadUnliftIO m
=> Tracer m LauncherLog
-> Command
-> (Maybe Handle -> ProcessHandle -> m a)
-> m (Either ProcessHasExited a)
withBackendProcess tr (Command name args before std_in std_out) action =
liftIO before >> withBackendCreateProcess tr process action
where
process = (proc name args) { std_in, std_out, std_err = std_out }
| A variant of ' withBackendProcess ' which accepts a general ' CreateProcess '
' System . Process.withCreateProcess ' .
1 . If the action finishes or throws an exception , then the process is also
2 . After the process is sent the signal to terminate , this function will
than the 5 second timeout . After the timeout has lapsed , the process will
3 . If the process exits , then the action is cancelled .
' System . Process . Typed.withProcessWait ' ( except for wait timeout ) . The
withBackendCreateProcess
:: forall m a. (MonadUnliftIO m)
=> Tracer m LauncherLog
-> CreateProcess
-> (Maybe Handle -> ProcessHandle -> m a)
-> m (Either ProcessHasExited a)
withBackendCreateProcess tr process action = do
traceWith tr $ MsgLauncherStart name args
exitVar <- newEmptyMVar
res <- fmap join $ tryJust spawnPredicate $ bracket
(createProcess process)
(cleanupProcessAndWait (readMVar exitVar)) $
\(mstdin, _, _, ph) -> do
pid <- maybe "-" (T.pack . show) <$> liftIO (getPid ph)
let tr' = contramap (WithProcessInfo name pid) tr
let tr'' = contramap MsgLauncherWait tr'
traceWith tr' MsgLauncherStarted
interruptibleWaitForProcess tr'' ph (putMVar exitVar)
race (ProcessHasExited name <$> readMVar exitVar) $ bracket_
(traceWith tr' MsgLauncherAction)
(traceWith tr' MsgLauncherActionDone)
(action mstdin ph)
traceWith tr $ MsgLauncherFinish (leftToMaybe res)
pure res
where
spawnPredicate :: IOException -> Maybe ProcessHasExited
spawnPredicate e
| name `isPrefixOf` show e = Just (ProcessDidNotStart name e)
| otherwise = Nothing
cleanupProcessAndWait getExitStatus ps@(_, _, _, ph) = do
traceWith tr MsgLauncherCleanup
liftIO $ cleanupProcess ps
let timeoutSecs = 5
tid <- forkIOWithUnmask $ \unmask -> unmask $ do
threadDelay (timeoutSecs * 1000 * 1000)
traceWith tr (MsgLauncherCleanupTimedOut timeoutSecs)
liftIO (getPid ph >>= mapM_ killProcess)
void getExitStatus `finally` killThread tid
traceWith tr MsgLauncherCleanupFinished
behaviour of the process library on Windows where ' waitForProcess ' seems
interruptibleWaitForProcess
:: Tracer m WaitForProcessLog
-> ProcessHandle
-> (ExitCode -> m ())
-> m ()
interruptibleWaitForProcess tr' ph onExit =
void $ forkIO (waitThread `onException` continue)
where
waitThread = do
traceWith tr' MsgWaitBefore
status <- waitForProcess ph
traceWith tr' (MsgWaitAfter status)
onExit status
continue = do
traceWith tr' MsgWaitCancelled
onExit (ExitFailure 256)
(name, args) = getCreateProcessNameArgs process
getCreateProcessNameArgs :: CreateProcess -> (FilePath, [String])
getCreateProcessNameArgs process = case cmdspec process of
ShellCommand cmd -> (cmd, [])
RawCommand cmd args -> (cmd, args)
data LauncherLog
= MsgLauncherStart String [String]
| WithProcessInfo String Text LaunchedProcessLog
| MsgLauncherCleanup
| MsgLauncherCleanupTimedOut Int
| MsgLauncherCleanupFinished
| MsgLauncherFinish (Maybe ProcessHasExited)
deriving (Show, Eq, Generic)
data LaunchedProcessLog
= MsgLauncherStarted
| MsgLauncherAction
| MsgLauncherActionDone
| MsgLauncherWait WaitForProcessLog
deriving (Show, Eq, Generic)
data WaitForProcessLog
= MsgWaitBefore
| MsgWaitAfter ExitCode
| MsgWaitCancelled
deriving (Show, Eq, Generic)
instance HasPrivacyAnnotation LauncherLog
instance HasSeverityAnnotation LauncherLog where
getSeverityAnnotation = \case
MsgLauncherStart _ _ -> Notice
WithProcessInfo _ _ msg -> getSeverityAnnotation msg
MsgLauncherFinish Nothing -> Debug
MsgLauncherFinish (Just (ProcessDidNotStart _ _)) -> Error
MsgLauncherFinish (Just (ProcessHasExited _ st)) -> case st of
ExitSuccess -> Notice
ExitFailure _ -> Error
MsgLauncherCleanup -> Debug
MsgLauncherCleanupTimedOut _ -> Notice
MsgLauncherCleanupFinished -> Debug
instance HasPrivacyAnnotation LaunchedProcessLog
instance HasSeverityAnnotation LaunchedProcessLog where
getSeverityAnnotation = \case
MsgLauncherStarted -> Info
MsgLauncherWait msg -> getSeverityAnnotation msg
MsgLauncherAction -> Debug
MsgLauncherActionDone -> Notice
instance HasPrivacyAnnotation WaitForProcessLog
instance HasSeverityAnnotation WaitForProcessLog where
getSeverityAnnotation = \case
MsgWaitBefore -> Debug
MsgWaitAfter _ -> Debug
MsgWaitCancelled -> Debug
instance ToText ProcessHasExited where
toText (ProcessHasExited name code) =
"Child process "+|name|+" exited with "+|statusText code|+""
toText (ProcessDidNotStart name _e) =
"Could not start "+|name|+""
instance ToText LauncherLog where
toText ll = fmt $ case ll of
MsgLauncherStart cmd args ->
"Starting process "+|buildCommand cmd args|+""
WithProcessInfo name pid msg ->
"["+|name|+"."+|pid|+"] "+|toText msg|+""
MsgLauncherFinish Nothing ->
"Action finished"
MsgLauncherFinish (Just exited) -> build $ toText exited
MsgLauncherCleanup ->
"Begin process cleanup"
MsgLauncherCleanupTimedOut t ->
"Timed out waiting for process to exit after "+|t|+" seconds"
MsgLauncherCleanupFinished ->
"Process cleanup finished"
instance ToText LaunchedProcessLog where
toText = \case
MsgLauncherStarted -> "Process started"
MsgLauncherAction -> "Running withBackend action"
MsgLauncherWait msg -> toText msg
MsgLauncherActionDone -> "withBackend action done. Terminating child process"
instance ToText WaitForProcessLog where
toText = \case
MsgWaitBefore ->
"Waiting for process to exit"
MsgWaitAfter status -> fmt $
"Process exited with "+|statusText status|+""
MsgWaitCancelled ->
"There was an exception waiting for the process"
statusText :: ExitCode -> Text
statusText ExitSuccess = "success"
statusText (ExitFailure n)
| n >= 0 = fmt $ "code "+||n||+" (failure)"
| otherwise = fmt $ "signal "+||(-n)||+""
|
49b618b759e36dab60e1581af7cbe4f464afb8bd469ea806d49f102bc30b67ba | babashka/babashka | test_util.cljc | (ns lambdaisland.regal.test-util
(:require [lambdaisland.regal :as regal])
#?(:cljs (:require-macros [lambdaisland.regal.test-util :refer [inline-resource]])
:clj (:require [clojure.java.io :as io]
[clojure.test.check.generators :as gen]
[lambdaisland.regal.generator :as regal-gen]
;; BB-TEST-PATCH: Don't have this dependency
#_[com.gfredericks.test.chuck.regexes.charsets :as charsets])))
#?(:clj
(defmacro inline-resource [resource-path]
(read-string (slurp (io/resource resource-path)))))
(defn read-test-cases []
#? (:clj (read-string (slurp (io/resource "lambdaisland/regal/test_cases.edn")))
:cljs (inline-resource "lambdaisland/regal/test_cases.edn")))
(defn flavor-parents [flavor]
(->> flavor
(iterate (comp first (partial parents regal/flavor-hierarchy)))
(take-while identity)))
(defn format-cases [cases]
(for [[form pattern & tests :as case] cases
:let [[props tests] (if (map? (first tests))
[(first tests) (rest tests)]
[{} tests])]]
(with-meta (merge
{:pattern pattern
:form form
:tests tests}
props)
(meta case))))
(defn test-cases
([]
(let [cases (read-test-cases)]
(loop [[id & cases] cases
result []]
(if id
(recur (drop-while vector? cases)
(conj result
{:id id
:cases (format-cases (take-while vector? cases))}))
result)))))
;; BB-TEST-PATCH: bb doesn't have Pattern class
#_(:clj
(do
(defn re2-compile ^com.google.re2j.Pattern [s]
(com.google.re2j.Pattern/compile s))
(defn re2-groups
[^com.google.re2j.Matcher m]
(let [gc (. m (groupCount))]
(if (zero? gc)
(. m (group))
(loop [ret [] c 0]
(if (<= c gc)
(recur (conj ret (. m (group c))) (inc c))
ret)))))
(defn re2-find
([^com.google.re2j.Matcher m]
(when (. m (find))
(re2-groups m)))
([^com.google.re2j.Pattern re s]
(let [m (.matcher re s)]
(re2-find m))))))
;; BB-TEST-PATCH: Uses ns that can't load
#_(:clj
(do
Implementation for generating classes using test.chuck 's charsets .
;; This should eventually be moved to lambdaisland.regal.generator
;; when we have our own charset implementation
(def token->charset-map
(let [whitespace-charset (apply charsets/union
(map (comp charsets/singleton str char) regal/whitespace-char-codes))]
{:any charsets/all-unicode-but-line-terminators
:digit (charsets/predefined-regex-classes \d)
:non-digit (charsets/predefined-regex-classes \D)
:word (charsets/predefined-regex-classes \w)
:non-word (charsets/predefined-regex-classes \W)
:whitespace whitespace-charset
:non-whitespace (charsets/difference
(charsets/intersection charsets/all-unicode
(charsets/range "\u0000" "\uFFFF"))
whitespace-charset)
:newline (charsets/singleton "\n")
:return (charsets/singleton "\r")
:tab (charsets/singleton "\t")
:form-feed (charsets/singleton "\f")
:alert (charsets/singleton "\u0007")
:escape (charsets/singleton "\u001B")
:vertical-whitespace (charsets/predefined-regex-classes \v)
:vertical-tab (charsets/singleton "\u000B")
:null (charsets/singleton "\u0000")}))
(defn token->charset [token]
(or (get token->charset-map token)
(throw (ex-info "Unknown token type" {:token token}))))
(defn class->charset [cls]
(reduce charsets/union*
charsets/empty
(for [c cls]
(try
(cond
(vector? c)
(let [[start end] (map str c)]
(assert (>= 0 (compare start end)))
(charsets/range start end))
(simple-keyword? c)
(token->charset c)
(string? c)
(reduce charsets/union*
(map (comp charsets/singleton str) c))
(char? c)
(charsets/singleton (str c)))
(catch Exception e
(throw (ex-info "Failed to translate class element into charset"
{:cls cls
:element c}
e)))))))
(defn class->gen [[op & elts :as expr]]
(let [cls (class->charset elts)
cls (case op
:not (charsets/difference charsets/all-unicode cls)
:class cls
(throw (ex-info "Unknown character class op" {:op op})))]
(if (nat-int? (charsets/size cls))
(gen/fmap #(charsets/nth cls %) (gen/choose 0 (dec (charsets/size cls))))
(throw (ex-info "Can't generate empty class" {:expr expr})))))
(defmethod regal-gen/-generator :not
[r _opts]
(class->gen r))
(defmethod regal-gen/-generator :class
[r _opts]
(class->gen r))))
#_
(test-cases)
| null | https://raw.githubusercontent.com/babashka/babashka/665ae4dd97535bf72a5ce34a19d624e74e5c4fe8/test-resources/lib_tests/lambdaisland/regal/test_util.cljc | clojure | BB-TEST-PATCH: Don't have this dependency
BB-TEST-PATCH: bb doesn't have Pattern class
BB-TEST-PATCH: Uses ns that can't load
This should eventually be moved to lambdaisland.regal.generator
when we have our own charset implementation | (ns lambdaisland.regal.test-util
(:require [lambdaisland.regal :as regal])
#?(:cljs (:require-macros [lambdaisland.regal.test-util :refer [inline-resource]])
:clj (:require [clojure.java.io :as io]
[clojure.test.check.generators :as gen]
[lambdaisland.regal.generator :as regal-gen]
#_[com.gfredericks.test.chuck.regexes.charsets :as charsets])))
#?(:clj
(defmacro inline-resource [resource-path]
(read-string (slurp (io/resource resource-path)))))
(defn read-test-cases []
#? (:clj (read-string (slurp (io/resource "lambdaisland/regal/test_cases.edn")))
:cljs (inline-resource "lambdaisland/regal/test_cases.edn")))
(defn flavor-parents [flavor]
(->> flavor
(iterate (comp first (partial parents regal/flavor-hierarchy)))
(take-while identity)))
(defn format-cases [cases]
(for [[form pattern & tests :as case] cases
:let [[props tests] (if (map? (first tests))
[(first tests) (rest tests)]
[{} tests])]]
(with-meta (merge
{:pattern pattern
:form form
:tests tests}
props)
(meta case))))
(defn test-cases
([]
(let [cases (read-test-cases)]
(loop [[id & cases] cases
result []]
(if id
(recur (drop-while vector? cases)
(conj result
{:id id
:cases (format-cases (take-while vector? cases))}))
result)))))
#_(:clj
(do
(defn re2-compile ^com.google.re2j.Pattern [s]
(com.google.re2j.Pattern/compile s))
(defn re2-groups
[^com.google.re2j.Matcher m]
(let [gc (. m (groupCount))]
(if (zero? gc)
(. m (group))
(loop [ret [] c 0]
(if (<= c gc)
(recur (conj ret (. m (group c))) (inc c))
ret)))))
(defn re2-find
([^com.google.re2j.Matcher m]
(when (. m (find))
(re2-groups m)))
([^com.google.re2j.Pattern re s]
(let [m (.matcher re s)]
(re2-find m))))))
#_(:clj
(do
Implementation for generating classes using test.chuck 's charsets .
(def token->charset-map
(let [whitespace-charset (apply charsets/union
(map (comp charsets/singleton str char) regal/whitespace-char-codes))]
{:any charsets/all-unicode-but-line-terminators
:digit (charsets/predefined-regex-classes \d)
:non-digit (charsets/predefined-regex-classes \D)
:word (charsets/predefined-regex-classes \w)
:non-word (charsets/predefined-regex-classes \W)
:whitespace whitespace-charset
:non-whitespace (charsets/difference
(charsets/intersection charsets/all-unicode
(charsets/range "\u0000" "\uFFFF"))
whitespace-charset)
:newline (charsets/singleton "\n")
:return (charsets/singleton "\r")
:tab (charsets/singleton "\t")
:form-feed (charsets/singleton "\f")
:alert (charsets/singleton "\u0007")
:escape (charsets/singleton "\u001B")
:vertical-whitespace (charsets/predefined-regex-classes \v)
:vertical-tab (charsets/singleton "\u000B")
:null (charsets/singleton "\u0000")}))
(defn token->charset [token]
(or (get token->charset-map token)
(throw (ex-info "Unknown token type" {:token token}))))
(defn class->charset [cls]
(reduce charsets/union*
charsets/empty
(for [c cls]
(try
(cond
(vector? c)
(let [[start end] (map str c)]
(assert (>= 0 (compare start end)))
(charsets/range start end))
(simple-keyword? c)
(token->charset c)
(string? c)
(reduce charsets/union*
(map (comp charsets/singleton str) c))
(char? c)
(charsets/singleton (str c)))
(catch Exception e
(throw (ex-info "Failed to translate class element into charset"
{:cls cls
:element c}
e)))))))
(defn class->gen [[op & elts :as expr]]
(let [cls (class->charset elts)
cls (case op
:not (charsets/difference charsets/all-unicode cls)
:class cls
(throw (ex-info "Unknown character class op" {:op op})))]
(if (nat-int? (charsets/size cls))
(gen/fmap #(charsets/nth cls %) (gen/choose 0 (dec (charsets/size cls))))
(throw (ex-info "Can't generate empty class" {:expr expr})))))
(defmethod regal-gen/-generator :not
[r _opts]
(class->gen r))
(defmethod regal-gen/-generator :class
[r _opts]
(class->gen r))))
#_
(test-cases)
|
7d3a21fda494ae278da6c56853504eb8d72a7db6adc60c2ff34807a85801dca0 | brownplt/lambda-py | python-tools.rkt | #lang racket/base
(require racket/pretty
"get-structured-python.rkt"
"python-interp.rkt"
"python-phases.rkt"
"python-phase2.rkt"
"python-phase1.rkt"
"python-desugar.rkt"
"python-cps.rkt"
"python-macros.rkt"
"python-lib.rkt"
"run-tests.rkt"
"util.rkt"
"python-evaluator.rkt"
"parser/parser.rkt"
"count-node.rkt"
"parser/python-lexer.rkt"
"parser/test-parser.rkt")
(provide (all-defined-out)
count
set-false
set-pypath
get-pypath)
(define (python-test-runner _ port)
(run-python port))
(define (run-python port)
(interp
(python-lib
(desugar-generators
(desugar
(new-scope-phase
(get-structured-python
((parser) port))))))))
(define (get-surface-syntax port)
(get-structured-python
((parser) port)))
(define (get-lexical-syntax port)
(phase2-without-locals (scope-phase
(get-structured-python
((parser) port)))))
(define (get-phase1-syntax port)
(scope-phase
(get-structured-python
((parser) port))))
(define (get-lexical-syntax-with-locals port)
(new-scope-phase
(get-structured-python
((parser) port))))
(define (desugar-w/lex port)
(desugar
(new-scope-phase
(get-structured-python
((parser) port)))))
(define (desugar-w/lib port)
(python-lib
(desugar
(new-scope-phase
(get-structured-python
((parser) port))))))
(define (desugar-w/macros port)
(desugar
(new-scope-phase
(get-structured-python
((parser) port)))))
(define (get-core-syntax port)
(desugar
(new-scope-phase
(get-structured-python
((parser) port)))))
(define (get-lexer-tokens port)
(lex-all port))
| null | https://raw.githubusercontent.com/brownplt/lambda-py/c3ee39502c8953d36b886e5a203f2eb51d2f495b/base/python-tools.rkt | racket | #lang racket/base
(require racket/pretty
"get-structured-python.rkt"
"python-interp.rkt"
"python-phases.rkt"
"python-phase2.rkt"
"python-phase1.rkt"
"python-desugar.rkt"
"python-cps.rkt"
"python-macros.rkt"
"python-lib.rkt"
"run-tests.rkt"
"util.rkt"
"python-evaluator.rkt"
"parser/parser.rkt"
"count-node.rkt"
"parser/python-lexer.rkt"
"parser/test-parser.rkt")
(provide (all-defined-out)
count
set-false
set-pypath
get-pypath)
(define (python-test-runner _ port)
(run-python port))
(define (run-python port)
(interp
(python-lib
(desugar-generators
(desugar
(new-scope-phase
(get-structured-python
((parser) port))))))))
(define (get-surface-syntax port)
(get-structured-python
((parser) port)))
(define (get-lexical-syntax port)
(phase2-without-locals (scope-phase
(get-structured-python
((parser) port)))))
(define (get-phase1-syntax port)
(scope-phase
(get-structured-python
((parser) port))))
(define (get-lexical-syntax-with-locals port)
(new-scope-phase
(get-structured-python
((parser) port))))
(define (desugar-w/lex port)
(desugar
(new-scope-phase
(get-structured-python
((parser) port)))))
(define (desugar-w/lib port)
(python-lib
(desugar
(new-scope-phase
(get-structured-python
((parser) port))))))
(define (desugar-w/macros port)
(desugar
(new-scope-phase
(get-structured-python
((parser) port)))))
(define (get-core-syntax port)
(desugar
(new-scope-phase
(get-structured-python
((parser) port)))))
(define (get-lexer-tokens port)
(lex-all port))
|
|
b3a0c3fd9a75d56294e8be105b70d49248a2aff12a527d0ee0f9f9e9af788698 | ds-wizard/engine-backend | Reference.hs | module Wizard.Service.KnowledgeModel.Squash.Event.Reference where
import Shared.Model.Event.Reference.ReferenceEvent
import Wizard.Service.KnowledgeModel.Squash.Event.Common
instance SimpleEventSquash EditReferenceEvent where
isSimpleEventSquashApplicable _ = True
isReorderEventSquashApplicable _ _ = False
-- --------------------------------------
isTypeChanged (EditResourcePageReferenceEvent' oldEvent) (EditResourcePageReferenceEvent' newEvent) = False
isTypeChanged (EditURLReferenceEvent' oldEvent) (EditURLReferenceEvent' newEvent) = False
isTypeChanged (EditCrossReferenceEvent' oldEvent) (EditCrossReferenceEvent' newEvent) = False
isTypeChanged _ _ = True
-- --------------------------------------
simpleSquashEvent previousEvent (EditResourcePageReferenceEvent' oldEvent) (EditResourcePageReferenceEvent' newEvent) =
EditResourcePageReferenceEvent' $
EditResourcePageReferenceEvent
{ uuid = newEvent.uuid
, parentUuid = newEvent.parentUuid
, entityUuid = newEvent.entityUuid
, shortUuid = applyValue oldEvent newEvent (.shortUuid)
, annotations = applyValue oldEvent newEvent (.annotations)
, createdAt = newEvent.createdAt
}
simpleSquashEvent previousEvent (EditURLReferenceEvent' oldEvent) (EditURLReferenceEvent' newEvent) =
EditURLReferenceEvent' $
EditURLReferenceEvent
{ uuid = newEvent.uuid
, parentUuid = newEvent.parentUuid
, entityUuid = newEvent.entityUuid
, url = applyValue oldEvent newEvent (.url)
, aLabel = applyValue oldEvent newEvent (.aLabel)
, annotations = applyValue oldEvent newEvent (.annotations)
, createdAt = newEvent.createdAt
}
simpleSquashEvent previousEvent (EditCrossReferenceEvent' oldEvent) (EditCrossReferenceEvent' newEvent) =
EditCrossReferenceEvent' $
EditCrossReferenceEvent
{ uuid = newEvent.uuid
, parentUuid = newEvent.parentUuid
, entityUuid = newEvent.entityUuid
, targetUuid = applyValue oldEvent newEvent (.targetUuid)
, description = applyValue oldEvent newEvent (.description)
, annotations = applyValue oldEvent newEvent (.annotations)
, createdAt = newEvent.createdAt
}
| null | https://raw.githubusercontent.com/ds-wizard/engine-backend/c60bcc649d3d1aefe73d54ba990bcb024c8948eb/engine-wizard/src/Wizard/Service/KnowledgeModel/Squash/Event/Reference.hs | haskell | --------------------------------------
-------------------------------------- | module Wizard.Service.KnowledgeModel.Squash.Event.Reference where
import Shared.Model.Event.Reference.ReferenceEvent
import Wizard.Service.KnowledgeModel.Squash.Event.Common
instance SimpleEventSquash EditReferenceEvent where
isSimpleEventSquashApplicable _ = True
isReorderEventSquashApplicable _ _ = False
isTypeChanged (EditResourcePageReferenceEvent' oldEvent) (EditResourcePageReferenceEvent' newEvent) = False
isTypeChanged (EditURLReferenceEvent' oldEvent) (EditURLReferenceEvent' newEvent) = False
isTypeChanged (EditCrossReferenceEvent' oldEvent) (EditCrossReferenceEvent' newEvent) = False
isTypeChanged _ _ = True
simpleSquashEvent previousEvent (EditResourcePageReferenceEvent' oldEvent) (EditResourcePageReferenceEvent' newEvent) =
EditResourcePageReferenceEvent' $
EditResourcePageReferenceEvent
{ uuid = newEvent.uuid
, parentUuid = newEvent.parentUuid
, entityUuid = newEvent.entityUuid
, shortUuid = applyValue oldEvent newEvent (.shortUuid)
, annotations = applyValue oldEvent newEvent (.annotations)
, createdAt = newEvent.createdAt
}
simpleSquashEvent previousEvent (EditURLReferenceEvent' oldEvent) (EditURLReferenceEvent' newEvent) =
EditURLReferenceEvent' $
EditURLReferenceEvent
{ uuid = newEvent.uuid
, parentUuid = newEvent.parentUuid
, entityUuid = newEvent.entityUuid
, url = applyValue oldEvent newEvent (.url)
, aLabel = applyValue oldEvent newEvent (.aLabel)
, annotations = applyValue oldEvent newEvent (.annotations)
, createdAt = newEvent.createdAt
}
simpleSquashEvent previousEvent (EditCrossReferenceEvent' oldEvent) (EditCrossReferenceEvent' newEvent) =
EditCrossReferenceEvent' $
EditCrossReferenceEvent
{ uuid = newEvent.uuid
, parentUuid = newEvent.parentUuid
, entityUuid = newEvent.entityUuid
, targetUuid = applyValue oldEvent newEvent (.targetUuid)
, description = applyValue oldEvent newEvent (.description)
, annotations = applyValue oldEvent newEvent (.annotations)
, createdAt = newEvent.createdAt
}
|
e19ee85f8514e41bb0c7a1902da59509e24859837599f0ece66f1f6ce79c60fb | mransan/raft-udp | raft_app_srv.mli | (** Generic App server implementation *)
* This module implements the protocol between the App sever and the
Raft server generically . It is parametrized by a module which
simply specify how to decode the Application specific
transaction data .
Raft server generically. It is parametrized by a module which
simply specify how to decode the Application specific
transaction data. *)
(** Module signature to be implemented by each specific application. *)
module type App_sig = sig
type data
(** Data associated with each new log entry *)
val decode : bytes -> data
(** Decoding function *)
type result
(** Result data of the log application in the application *)
val encode : result -> bytes
(** Encoding function *)
end
module Make(App:App_sig) : sig
type log_entry = {
id : string;
index : int;
app_data : App.data;
}
type log_result = {
id : string;
index : int;
app_result : App.result option;
}
type add_log_entries = log_entry list * (log_result list -> unit)
* add - log requests to be processed by the application . The first value
is the list of logs to be processed chronologically by the application ,
while the second value is the function callback for the application to
notify the result of the validation .
is the list of logs to be processed chronologically by the application,
while the second value is the function callback for the application to
notify the result of the validation. *)
val start : Raft_com_conf.t -> int -> add_log_entries Lwt_stream.t
(** [start configuration] returns the continuous stream of request to
be validated by the specific application. *)
end (* Make *)
| null | https://raw.githubusercontent.com/mransan/raft-udp/ffa307fa6d8bdaa3133f3cc66149ac7dfda5fc7c/src/app/raft_app_srv.mli | ocaml | * Generic App server implementation
* Module signature to be implemented by each specific application.
* Data associated with each new log entry
* Decoding function
* Result data of the log application in the application
* Encoding function
* [start configuration] returns the continuous stream of request to
be validated by the specific application.
Make |
* This module implements the protocol between the App sever and the
Raft server generically . It is parametrized by a module which
simply specify how to decode the Application specific
transaction data .
Raft server generically. It is parametrized by a module which
simply specify how to decode the Application specific
transaction data. *)
module type App_sig = sig
type data
val decode : bytes -> data
type result
val encode : result -> bytes
end
module Make(App:App_sig) : sig
type log_entry = {
id : string;
index : int;
app_data : App.data;
}
type log_result = {
id : string;
index : int;
app_result : App.result option;
}
type add_log_entries = log_entry list * (log_result list -> unit)
* add - log requests to be processed by the application . The first value
is the list of logs to be processed chronologically by the application ,
while the second value is the function callback for the application to
notify the result of the validation .
is the list of logs to be processed chronologically by the application,
while the second value is the function callback for the application to
notify the result of the validation. *)
val start : Raft_com_conf.t -> int -> add_log_entries Lwt_stream.t
|
47597ef195d99c69e02391216bb3e6e74df04e140d41989d22ecfa64f80c1f4b | kadena-community/bag-of-holding | Test.hs | module Main ( main ) where
import Data . Aeson ( decode , encode )
-- import Holding
import RIO
import Test.Tasty
import Test . Tasty . HUnit ( Assertion , assertBool , )
---
main :: IO ()
main = defaultMain suites
suites :: TestTree
suites = testGroup "Tests"
[ testGroup "Unit Tests"
[ -- testCase "keysJsonIso" keysJsonIso
]
]
-- keysJsonIso :: Assertion
-- keysJsonIso = do
-- mks <- decode . encode . Hidden <$> keys
-- assertBool "keysJsonIso failed" $ isJust (mks :: Maybe Keys)
| null | https://raw.githubusercontent.com/kadena-community/bag-of-holding/73aa21741b88d8af15e967d93d8cd2ea2605afa1/test/Test.hs | haskell | import Holding
-
testCase "keysJsonIso" keysJsonIso
keysJsonIso :: Assertion
keysJsonIso = do
mks <- decode . encode . Hidden <$> keys
assertBool "keysJsonIso failed" $ isJust (mks :: Maybe Keys) | module Main ( main ) where
import Data . Aeson ( decode , encode )
import RIO
import Test.Tasty
import Test . Tasty . HUnit ( Assertion , assertBool , )
main :: IO ()
main = defaultMain suites
suites :: TestTree
suites = testGroup "Tests"
[ testGroup "Unit Tests"
]
]
|
81faa0dbf435e2259e96dbb40a16573d170feb7307c927bd2744c523d74f729b | philnguyen/soft-contract | tree.rkt | #lang racket
(provide
tree-state
lplaced%
generate-tree
tree-next
hand-out?
)
(require
"../base/untyped.rkt"
"board.rkt"
"state.rkt"
)
(require (only-in "basics.rkt"
shares-available?
shares-order?
))
;; -----------------------------------------------------------------------------
(struct hand-out (
tile
tree))
;; HandOut = (hand-out t st)
denotes that player received tile t and st is the Tree generated from the resulting state
(define (placed-tile p)
(get-field tile p))
(define (placed-hotel p)
(get-field hotel p))
(define atree%
(class object% ;(tree<%>)
(init-field state)
(super-new)
(define/public (nothing-to-place?)
#f)
(define/public (to-state)
(get-field state this))
;(abstract next)
(define/public (next t h d* h* pt)
(error 'not-implemented))
;; template hook pattern: template
(define/public (founding n order-policies)
(unless (shares-order? order-policies)
(error 'atree-founding "Precondition"))
(traversal n order-policies (is-action FOUNDING)))
;; template hook pattern: template
(define/public (merging n order-policies)
(traversal n order-policies (is-action MERGING)))
;; hook
;; how many transitions in THIS tree (up to depth n) satisfy the given predicate
;; Nat [Listof ShareOrder] [Placed -> Nat] -> Nat
(define/public (traversal n order-policies i)
(error 'not-impolementd))
private field : ACTION - > Placed - > { 0,1 }
;; is the alternative a merging action?
(define ((is-action tag) p)
(if (and (placed-hotel p) (eq? (get-field reason p) tag)) 1 0))
;; use pick-tile to hand out a tile; extract the corresponding subtree
[ [ Listof Tile ] - > Tile ] [ ] - > * [ Maybe Tile ] Tree
(define/public (lookup-tile pick-tile lo-handout)
(values #f this))
))
(define state%
(class atree% ;(tree<%>)
(super-new)
(define/override (next . _)
(error 'tree-next "finale state can't transition"))
(define/override (traversal n policies p?) 0)
(define/override (lookup-tile pick-tile lo-handout)
(values #f this))))
(define lplaced%
(class atree% ;(tree<%>)
(super-new)
(init-field
lplaced)
(define/override (nothing-to-place?)
(null? lplaced))
(define/override (next tile hotel decisions shares-to-buy pick-tile)
(define intermediate (send (lookup-purchase tile hotel) purchase decisions shares-to-buy))
(unless (list? intermediate) (error "Expected a HandOut, got a State%"))
(send this lookup-tile pick-tile intermediate))
;; Tile [Maybe Hotel] -> Placed
;; lookup the one and only Placed from lo-placed that represents the action of placing t (& h)
(define/private (lookup-purchase t h)
(or
(for/or
((p lplaced)
#:when (and (equal? (placed-hotel p) h)
(equal? (placed-tile p) t)))
p)
(error 'noplace)))
(define/override (lookup-tile pick-tile lo-hand-out)
(define tile (pick-tile (map hand-out-tile lo-hand-out)))
(define st (for/or
((p lo-hand-out)
#:when (equal? (hand-out-tile p) tile))
(hand-out-tree p)))
(values tile (or st (error 'lookupfailed))))
(define/override (traversal n policies p?)
(if (= n 0)
0
(for/sum
((branch (in-list lplaced)))
(define d* (map (lambda (p)
(list p '()))
(state-players (get-field state/tile branch))))
(define a (send branch acceptable-policies policies))
(+ (p? branch)
;; do not inspect every subtree because share buying does not affect actions
(if (empty? a)
0
(* (length a)
(for/sum
((st (send branch to-trees d* (first a))))
(send st traversal (- n 1) policies p?))))))))))
(define placed%
(class object%
(init-field state tile hotel state/tile reason)
(super-new)
Decisions ShareOrder - > state% or [ ]
;; given merger decisions and a purchase order, generate the next stage from THIS decision point
(define/public (purchase decisions share-order)
;; ---------------------------------------------------------------------------------------------
;; contract checking
(when (eq? MERGING reason)
(define players (state-players state/tile))
(unless (= (length decisions) (length players))
(printf "contract failure: received wrong number of decisions")
;; (pretty-print players)
( pretty - print ( map first decisions ) )
(error 'purchase "done")))
;; ---------------------------------------------------------------------------------------------
(define state/decisions
(if (eq? MERGING reason)
(state-return-shares state/tile decisions (state-board state))
state/tile))
(define state/bought (state-buy-shares state/decisions share-order))
(define available-tiles (state-tiles state/bought))
(if (empty? available-tiles)
(new state% [state state/bought])
(for/list
((tile available-tiles))
(hand-out tile (generate-tree (state-next-turn (state-move-tile state/bought tile)))))))
Decisions ShareOrder - > [ ]
;; given a purchase order, generate list of trees from THIS decision point's purchases
(define/public (to-trees decisions share-order)
(define state-or-hand-out (purchase decisions share-order))
(cond
[(list? state-or-hand-out) (map hand-out-tree state-or-hand-out)]
[else (list state-or-hand-out)]))
[ [ ]
;; filter out those share orders that are acceptable given THIS decision point's state
(define/public (acceptable-policies policies)
(unless (andmap shares-order? policies)
(error 'acceptable-policies "Precondigion"))
(define state state/tile)
(define budget (player-money (state-current-player state)))
(define board (state-board state))
(define shares (state-shares state))
(for/list ((p policies)
#:when (and (shares-available? shares p)
(affordable? board p budget)))
p))))
(define (generate-tree state)
(cond
[(state-final? state)
(new state% [state state])]
[else (define board (state-board state))
(define available-hotels (state-hotels state))
(define lplaced
(for/fold
((lo-placed '()))
((t (player-tiles (state-current-player state))))
(define kind (what-kind-of-spot board t))
(define hotels
(cond
[(eq? kind IMPOSSIBLE) '()]
[(and (eq? FOUNDING kind) (cons? available-hotels)) available-hotels]
[(eq? MERGING kind)
(define-values (acquirers _) (merging-which board t))
acquirers]
[else (list #f)]))
(define new-placements
(for/list
((h (in-list hotels)))
(define state/tile
(if h (state-place-tile state t h) (state-place-tile state t)))
(new placed% [state state][tile t][hotel h][state/tile state/tile][reason kind])))
(append new-placements lo-placed)))
(new lplaced% (state state) (lplaced lplaced))]))
;; ASSUME: current player has enough money to buy the desired shares
(define (tree-next current-tree tile hotel decisions shares-to-buy pick-tile)
(send current-tree next tile hotel decisions shares-to-buy pick-tile))
(define (tree-state t)
(send t to-state))
| null | https://raw.githubusercontent.com/philnguyen/soft-contract/5e07dc2d622ee80b961f4e8aebd04ce950720239/soft-contract/test/gradual-typing-benchmarks/acquire/tree.rkt | racket | -----------------------------------------------------------------------------
HandOut = (hand-out t st)
(tree<%>)
(abstract next)
template hook pattern: template
template hook pattern: template
hook
how many transitions in THIS tree (up to depth n) satisfy the given predicate
Nat [Listof ShareOrder] [Placed -> Nat] -> Nat
is the alternative a merging action?
use pick-tile to hand out a tile; extract the corresponding subtree
(tree<%>)
(tree<%>)
Tile [Maybe Hotel] -> Placed
lookup the one and only Placed from lo-placed that represents the action of placing t (& h)
do not inspect every subtree because share buying does not affect actions
given merger decisions and a purchase order, generate the next stage from THIS decision point
---------------------------------------------------------------------------------------------
contract checking
(pretty-print players)
---------------------------------------------------------------------------------------------
given a purchase order, generate list of trees from THIS decision point's purchases
filter out those share orders that are acceptable given THIS decision point's state
ASSUME: current player has enough money to buy the desired shares | #lang racket
(provide
tree-state
lplaced%
generate-tree
tree-next
hand-out?
)
(require
"../base/untyped.rkt"
"board.rkt"
"state.rkt"
)
(require (only-in "basics.rkt"
shares-available?
shares-order?
))
(struct hand-out (
tile
tree))
denotes that player received tile t and st is the Tree generated from the resulting state
(define (placed-tile p)
(get-field tile p))
(define (placed-hotel p)
(get-field hotel p))
(define atree%
(init-field state)
(super-new)
(define/public (nothing-to-place?)
#f)
(define/public (to-state)
(get-field state this))
(define/public (next t h d* h* pt)
(error 'not-implemented))
(define/public (founding n order-policies)
(unless (shares-order? order-policies)
(error 'atree-founding "Precondition"))
(traversal n order-policies (is-action FOUNDING)))
(define/public (merging n order-policies)
(traversal n order-policies (is-action MERGING)))
(define/public (traversal n order-policies i)
(error 'not-impolementd))
private field : ACTION - > Placed - > { 0,1 }
(define ((is-action tag) p)
(if (and (placed-hotel p) (eq? (get-field reason p) tag)) 1 0))
[ [ Listof Tile ] - > Tile ] [ ] - > * [ Maybe Tile ] Tree
(define/public (lookup-tile pick-tile lo-handout)
(values #f this))
))
(define state%
(super-new)
(define/override (next . _)
(error 'tree-next "finale state can't transition"))
(define/override (traversal n policies p?) 0)
(define/override (lookup-tile pick-tile lo-handout)
(values #f this))))
(define lplaced%
(super-new)
(init-field
lplaced)
(define/override (nothing-to-place?)
(null? lplaced))
(define/override (next tile hotel decisions shares-to-buy pick-tile)
(define intermediate (send (lookup-purchase tile hotel) purchase decisions shares-to-buy))
(unless (list? intermediate) (error "Expected a HandOut, got a State%"))
(send this lookup-tile pick-tile intermediate))
(define/private (lookup-purchase t h)
(or
(for/or
((p lplaced)
#:when (and (equal? (placed-hotel p) h)
(equal? (placed-tile p) t)))
p)
(error 'noplace)))
(define/override (lookup-tile pick-tile lo-hand-out)
(define tile (pick-tile (map hand-out-tile lo-hand-out)))
(define st (for/or
((p lo-hand-out)
#:when (equal? (hand-out-tile p) tile))
(hand-out-tree p)))
(values tile (or st (error 'lookupfailed))))
(define/override (traversal n policies p?)
(if (= n 0)
0
(for/sum
((branch (in-list lplaced)))
(define d* (map (lambda (p)
(list p '()))
(state-players (get-field state/tile branch))))
(define a (send branch acceptable-policies policies))
(+ (p? branch)
(if (empty? a)
0
(* (length a)
(for/sum
((st (send branch to-trees d* (first a))))
(send st traversal (- n 1) policies p?))))))))))
(define placed%
(class object%
(init-field state tile hotel state/tile reason)
(super-new)
Decisions ShareOrder - > state% or [ ]
(define/public (purchase decisions share-order)
(when (eq? MERGING reason)
(define players (state-players state/tile))
(unless (= (length decisions) (length players))
(printf "contract failure: received wrong number of decisions")
( pretty - print ( map first decisions ) )
(error 'purchase "done")))
(define state/decisions
(if (eq? MERGING reason)
(state-return-shares state/tile decisions (state-board state))
state/tile))
(define state/bought (state-buy-shares state/decisions share-order))
(define available-tiles (state-tiles state/bought))
(if (empty? available-tiles)
(new state% [state state/bought])
(for/list
((tile available-tiles))
(hand-out tile (generate-tree (state-next-turn (state-move-tile state/bought tile)))))))
Decisions ShareOrder - > [ ]
(define/public (to-trees decisions share-order)
(define state-or-hand-out (purchase decisions share-order))
(cond
[(list? state-or-hand-out) (map hand-out-tree state-or-hand-out)]
[else (list state-or-hand-out)]))
[ [ ]
(define/public (acceptable-policies policies)
(unless (andmap shares-order? policies)
(error 'acceptable-policies "Precondigion"))
(define state state/tile)
(define budget (player-money (state-current-player state)))
(define board (state-board state))
(define shares (state-shares state))
(for/list ((p policies)
#:when (and (shares-available? shares p)
(affordable? board p budget)))
p))))
(define (generate-tree state)
(cond
[(state-final? state)
(new state% [state state])]
[else (define board (state-board state))
(define available-hotels (state-hotels state))
(define lplaced
(for/fold
((lo-placed '()))
((t (player-tiles (state-current-player state))))
(define kind (what-kind-of-spot board t))
(define hotels
(cond
[(eq? kind IMPOSSIBLE) '()]
[(and (eq? FOUNDING kind) (cons? available-hotels)) available-hotels]
[(eq? MERGING kind)
(define-values (acquirers _) (merging-which board t))
acquirers]
[else (list #f)]))
(define new-placements
(for/list
((h (in-list hotels)))
(define state/tile
(if h (state-place-tile state t h) (state-place-tile state t)))
(new placed% [state state][tile t][hotel h][state/tile state/tile][reason kind])))
(append new-placements lo-placed)))
(new lplaced% (state state) (lplaced lplaced))]))
(define (tree-next current-tree tile hotel decisions shares-to-buy pick-tile)
(send current-tree next tile hotel decisions shares-to-buy pick-tile))
(define (tree-state t)
(send t to-state))
|
6ff1aa543f57a76cd9520e74550f726afbf1276a237ce6de20d100d6859010f8 | jackfirth/lens | syntax.rkt | #lang reprovide
lens/private/syntax/main
| null | https://raw.githubusercontent.com/jackfirth/lens/733db7744921409b69ddc78ae5b23ffaa6b91e37/lens-unstable/unstable/lens/syntax.rkt | racket | #lang reprovide
lens/private/syntax/main
|
|
8b1fd6dde7a1de42dd2de914aca0f46d0e5bd1b0c4b1fed3ae387d6ba904aa3e | geremih/xcljb | common.clj | (ns xcljb.common)
(defmulti read-reply (fn [ext-name opcode _] [ext-name opcode]))
(defmulti read-event (fn [ext-name n _] [ext-name n]))
(defmulti read-error (fn [ext-name n _] [ext-name n]))
(defn padding [n]
(rem (- 4 (rem n 4))
4))
(defn bit-count [n]
(.bitCount (BigInteger/valueOf n)))
| null | https://raw.githubusercontent.com/geremih/xcljb/59e9ff795bf00595a3d46231a7bb4ec976852396/src/xcljb/common.clj | clojure | (ns xcljb.common)
(defmulti read-reply (fn [ext-name opcode _] [ext-name opcode]))
(defmulti read-event (fn [ext-name n _] [ext-name n]))
(defmulti read-error (fn [ext-name n _] [ext-name n]))
(defn padding [n]
(rem (- 4 (rem n 4))
4))
(defn bit-count [n]
(.bitCount (BigInteger/valueOf n)))
|
|
feb44faadc14fef351b1b28cd06d608a0aa24bdf618f5e76bbbbac8f7834075a | sharplispers/ironclad | util.lisp | ;;;; -*- mode: lisp; indent-tabs-mode: nil -*-
;;;; util.lisp -- functions that come in handy in crypto applications
(in-package :crypto)
(declaim (inline byte-array-to-hex-string
hex-string-to-byte-array
ascii-string-to-byte-array))
(defun byte-array-to-hex-string (vector &key (start 0) end (element-type 'base-char))
"Return a string containing the hexadecimal representation of the
subsequence of VECTOR between START and END. ELEMENT-TYPE controls
the element-type of the returned string."
(declare (type (vector (unsigned-byte 8)) vector)
(type fixnum start)
(type (or null fixnum) end)
(optimize (speed 3) (safety 1)))
(let* ((end (or end (length vector)))
(length (- end start))
(hexdigits #.(coerce "0123456789abcdef" 'simple-base-string)))
(loop with string = (ecase element-type
;; so that the compiler optimization can jump in
(base-char (make-string (* length 2)
:element-type 'base-char))
(character (make-string (* length 2)
:element-type 'character)))
for i from start below end
for j from 0 below (* length 2) by 2
do (let ((byte (aref vector i)))
(declare (optimize (safety 0)))
(setf (aref string j)
(aref hexdigits (ldb (byte 4 4) byte))
(aref string (1+ j))
(aref hexdigits (ldb (byte 4 0) byte))))
finally (return string))))
(defun hex-string-to-byte-array (string &key (start 0) (end nil))
"Parses a substring of STRING delimited by START and END of
hexadecimal digits into a byte array."
(declare (type string string))
(let* ((end (or end (length string)))
(length (/ (- end start) 2))
(key (make-array length :element-type '(unsigned-byte 8))))
(declare (type (simple-array (unsigned-byte 8) (*)) key))
(flet ((char-to-digit (char)
(or (position char "0123456789abcdef" :test #'char-equal)
(error 'ironclad-error
:format-control "~A is not a hex digit"
:format-arguments (list char)))))
(loop for i from 0
for j from start below end by 2
do (setf (aref key i)
(+ (* (char-to-digit (char string j)) 16)
(char-to-digit (char string (1+ j)))))
finally (return key)))))
(defun ascii-string-to-byte-array (string &key (start 0) end)
"Convert STRING to a (VECTOR (UNSIGNED-BYTE 8)). It is an error if
STRING contains any character whose CHAR-CODE is greater than 255."
(declare (type string string)
(type fixnum start)
(type (or null fixnum) end)
(optimize (speed 3) (safety 1)))
(let* ((length (length string))
(vec (make-array length :element-type '(unsigned-byte 8)))
(end (or end length)))
(loop for i from start below end do
(let ((byte (char-code (char string i))))
(unless (< byte 256)
(error 'ironclad-error
:format-control "~A is not an ASCII character"
:format-arguments (list (char string i))))
(setf (aref vec i) byte))
finally (return vec))))
(declaim (notinline byte-array-to-hex-string
hex-string-to-byte-array
ascii-string-to-byte-array))
(defun constant-time-equal (data1 data2)
"Returns T if the elements in DATA1 and DATA2 are identical, NIL otherwise.
All the elements of DATA1 and DATA2 are compared to prevent timing attacks."
(declare (type (simple-array (unsigned-byte 8) (*)) data1 data2)
(optimize (speed 3)))
(let ((res (if (= (length data1) (length data2)) 0 1)))
(declare (type (unsigned-byte 8) res))
(loop for d1 across data1
for d2 across data2
do (setf res (logior res (logxor d1 d2))))
(zerop res)))
| null | https://raw.githubusercontent.com/sharplispers/ironclad/6cc4da8554558ee2e89ea38802bbf6d83100d4ea/src/util.lisp | lisp | -*- mode: lisp; indent-tabs-mode: nil -*-
util.lisp -- functions that come in handy in crypto applications
so that the compiler optimization can jump in |
(in-package :crypto)
(declaim (inline byte-array-to-hex-string
hex-string-to-byte-array
ascii-string-to-byte-array))
(defun byte-array-to-hex-string (vector &key (start 0) end (element-type 'base-char))
"Return a string containing the hexadecimal representation of the
subsequence of VECTOR between START and END. ELEMENT-TYPE controls
the element-type of the returned string."
(declare (type (vector (unsigned-byte 8)) vector)
(type fixnum start)
(type (or null fixnum) end)
(optimize (speed 3) (safety 1)))
(let* ((end (or end (length vector)))
(length (- end start))
(hexdigits #.(coerce "0123456789abcdef" 'simple-base-string)))
(loop with string = (ecase element-type
(base-char (make-string (* length 2)
:element-type 'base-char))
(character (make-string (* length 2)
:element-type 'character)))
for i from start below end
for j from 0 below (* length 2) by 2
do (let ((byte (aref vector i)))
(declare (optimize (safety 0)))
(setf (aref string j)
(aref hexdigits (ldb (byte 4 4) byte))
(aref string (1+ j))
(aref hexdigits (ldb (byte 4 0) byte))))
finally (return string))))
(defun hex-string-to-byte-array (string &key (start 0) (end nil))
"Parses a substring of STRING delimited by START and END of
hexadecimal digits into a byte array."
(declare (type string string))
(let* ((end (or end (length string)))
(length (/ (- end start) 2))
(key (make-array length :element-type '(unsigned-byte 8))))
(declare (type (simple-array (unsigned-byte 8) (*)) key))
(flet ((char-to-digit (char)
(or (position char "0123456789abcdef" :test #'char-equal)
(error 'ironclad-error
:format-control "~A is not a hex digit"
:format-arguments (list char)))))
(loop for i from 0
for j from start below end by 2
do (setf (aref key i)
(+ (* (char-to-digit (char string j)) 16)
(char-to-digit (char string (1+ j)))))
finally (return key)))))
(defun ascii-string-to-byte-array (string &key (start 0) end)
"Convert STRING to a (VECTOR (UNSIGNED-BYTE 8)). It is an error if
STRING contains any character whose CHAR-CODE is greater than 255."
(declare (type string string)
(type fixnum start)
(type (or null fixnum) end)
(optimize (speed 3) (safety 1)))
(let* ((length (length string))
(vec (make-array length :element-type '(unsigned-byte 8)))
(end (or end length)))
(loop for i from start below end do
(let ((byte (char-code (char string i))))
(unless (< byte 256)
(error 'ironclad-error
:format-control "~A is not an ASCII character"
:format-arguments (list (char string i))))
(setf (aref vec i) byte))
finally (return vec))))
(declaim (notinline byte-array-to-hex-string
hex-string-to-byte-array
ascii-string-to-byte-array))
(defun constant-time-equal (data1 data2)
"Returns T if the elements in DATA1 and DATA2 are identical, NIL otherwise.
All the elements of DATA1 and DATA2 are compared to prevent timing attacks."
(declare (type (simple-array (unsigned-byte 8) (*)) data1 data2)
(optimize (speed 3)))
(let ((res (if (= (length data1) (length data2)) 0 1)))
(declare (type (unsigned-byte 8) res))
(loop for d1 across data1
for d2 across data2
do (setf res (logior res (logxor d1 d2))))
(zerop res)))
|
e0c5d62abc6ce5ad3aa8474a50e6d081dc77fe240f4333f7d58560f26e477dc9 | rmculpepper/gamble | basic.rkt | Copyright ( c ) 2014
Released under the terms of the 2 - clause BSD license .
;; See the file COPYRIGHT for details.
#lang gamble
(require (for-syntax racket/base)
(except-in rackunit fail))
;; Parameterized by sampler method and tolerance?
(define (make-basic-tests name compute-dist tolerance)
(define-syntax (test stx)
(syntax-case stx ()
[(test expr expected)
#`(test-case (format "~a line ~s: ~.s" name '#,(syntax-line stx) 'expr)
(define actual (compute-dist (lambda () expr)))
(check <=
(discrete-dist-error actual (make-discrete-dist expected))
tolerance))]))
(test (flip 1/2)
'((#t . 1/2)
(#f . 1/2)))
(test (flip 2/3)
'((#t . 2/3)
(#f . 1/3)))
(test (flip 1/10)
'((#t . 1/10)
(#f . 9/10)))
(test (let ()
(define A (flip))
(define B (flip))
(unless (or A B) (fail))
A)
'((#t . 2/3)
(#f . 1/3)))
)
(define ((rejection-compute-dist iters) proc)
(sampler->discrete-dist (rejection-sampler (proc)) iters))
(define ((imp-compute-dist iters) proc)
(sampler->discrete-dist (importance-sampler (proc)) iters))
(define ((mh-compute-dist iters) proc)
(sampler->discrete-dist (mh-sampler (proc)) iters))
(define ((enumerate-compute-dist) proc)
(enumerate (proc)))
(define ((egibbs-compute-dist iters) proc)
(sampler->discrete-dist (mh-sampler #:transition (enumerative-gibbs) (proc)) iters))
(define ((slice-compute-dist iters) proc)
(sampler->discrete-dist (mh-sampler #:transition (slice) (proc)) iters))
(make-basic-tests 'rejection (rejection-compute-dist 1000) 0.05)
(make-basic-tests 'imp-sampl (imp-compute-dist 1000) 0.05)
(make-basic-tests 'mh (mh-compute-dist 1000) 0.10)
(make-basic-tests 'enumerate (enumerate-compute-dist) 1e-6)
(make-basic-tests 'egibbs (egibbs-compute-dist 1000) 0.05)
(make-basic-tests 'slice (slice-compute-dist 1000) 0.05)
| null | https://raw.githubusercontent.com/rmculpepper/gamble/a5231e2eb3dc0721eedc63a77ee6d9333846323e/gamble/tests/basic.rkt | racket | See the file COPYRIGHT for details.
Parameterized by sampler method and tolerance? | Copyright ( c ) 2014
Released under the terms of the 2 - clause BSD license .
#lang gamble
(require (for-syntax racket/base)
(except-in rackunit fail))
(define (make-basic-tests name compute-dist tolerance)
(define-syntax (test stx)
(syntax-case stx ()
[(test expr expected)
#`(test-case (format "~a line ~s: ~.s" name '#,(syntax-line stx) 'expr)
(define actual (compute-dist (lambda () expr)))
(check <=
(discrete-dist-error actual (make-discrete-dist expected))
tolerance))]))
(test (flip 1/2)
'((#t . 1/2)
(#f . 1/2)))
(test (flip 2/3)
'((#t . 2/3)
(#f . 1/3)))
(test (flip 1/10)
'((#t . 1/10)
(#f . 9/10)))
(test (let ()
(define A (flip))
(define B (flip))
(unless (or A B) (fail))
A)
'((#t . 2/3)
(#f . 1/3)))
)
(define ((rejection-compute-dist iters) proc)
(sampler->discrete-dist (rejection-sampler (proc)) iters))
(define ((imp-compute-dist iters) proc)
(sampler->discrete-dist (importance-sampler (proc)) iters))
(define ((mh-compute-dist iters) proc)
(sampler->discrete-dist (mh-sampler (proc)) iters))
(define ((enumerate-compute-dist) proc)
(enumerate (proc)))
(define ((egibbs-compute-dist iters) proc)
(sampler->discrete-dist (mh-sampler #:transition (enumerative-gibbs) (proc)) iters))
(define ((slice-compute-dist iters) proc)
(sampler->discrete-dist (mh-sampler #:transition (slice) (proc)) iters))
(make-basic-tests 'rejection (rejection-compute-dist 1000) 0.05)
(make-basic-tests 'imp-sampl (imp-compute-dist 1000) 0.05)
(make-basic-tests 'mh (mh-compute-dist 1000) 0.10)
(make-basic-tests 'enumerate (enumerate-compute-dist) 1e-6)
(make-basic-tests 'egibbs (egibbs-compute-dist 1000) 0.05)
(make-basic-tests 'slice (slice-compute-dist 1000) 0.05)
|
f27b60bf5070d18cabe8bfa37945a51c4b6c55d3e4830a7eb8a7153317c9a21a | c4-project/c4f | scope.ml | This file is part of c4f .
Copyright ( c ) 2018 - 2022 C4 Project
c4 t itself is licensed under the MIT License . See the LICENSE file in the
project root for more information .
Parts of c4 t are based on code from the Herdtools7 project
( ) : see the LICENSE.herd file in the
project root for more information .
Copyright (c) 2018-2022 C4 Project
c4t itself is licensed under the MIT License. See the LICENSE file in the
project root for more information.
Parts of c4t are based on code from the Herdtools7 project
() : see the LICENSE.herd file in the
project root for more information. *)
module Src = C4f_common
let%test_module "reduce" =
( module struct
let test (l_scope : Src.Scope.t) (r_scope : Src.Scope.t) : unit =
let l = (l_scope, "left") in
let r = (r_scope, "right") in
Stdio.print_endline (snd (Src.Scope.reduce l r))
let%expect_test "both global scope" =
Src.Scope.(test Global Global) ;
[%expect {| left |}]
let%expect_test "both local scope (same thread ID)" =
Src.Scope.(test (Local 42) (Local 42)) ;
[%expect {| left |}]
(* Local scope with different thread IDs isn't well-defined, so we don't
test it. *)
let%expect_test "left global, right local" =
Src.Scope.(test Global (Local 27)) ;
[%expect {| right |}]
let%expect_test "left local, right global" =
Src.Scope.(test (Local 53) Global) ;
[%expect {| left |}]
end )
| null | https://raw.githubusercontent.com/c4-project/c4f/8939477732861789abc807c8c1532a302b2848a5/lib/common/test/scope.ml | ocaml | Local scope with different thread IDs isn't well-defined, so we don't
test it. | This file is part of c4f .
Copyright ( c ) 2018 - 2022 C4 Project
c4 t itself is licensed under the MIT License . See the LICENSE file in the
project root for more information .
Parts of c4 t are based on code from the Herdtools7 project
( ) : see the LICENSE.herd file in the
project root for more information .
Copyright (c) 2018-2022 C4 Project
c4t itself is licensed under the MIT License. See the LICENSE file in the
project root for more information.
Parts of c4t are based on code from the Herdtools7 project
() : see the LICENSE.herd file in the
project root for more information. *)
module Src = C4f_common
let%test_module "reduce" =
( module struct
let test (l_scope : Src.Scope.t) (r_scope : Src.Scope.t) : unit =
let l = (l_scope, "left") in
let r = (r_scope, "right") in
Stdio.print_endline (snd (Src.Scope.reduce l r))
let%expect_test "both global scope" =
Src.Scope.(test Global Global) ;
[%expect {| left |}]
let%expect_test "both local scope (same thread ID)" =
Src.Scope.(test (Local 42) (Local 42)) ;
[%expect {| left |}]
let%expect_test "left global, right local" =
Src.Scope.(test Global (Local 27)) ;
[%expect {| right |}]
let%expect_test "left local, right global" =
Src.Scope.(test (Local 53) Global) ;
[%expect {| left |}]
end )
|
a11dbee4e6b8766ea5109ca4f2c1b98910d03e422401cb16b495c444b0529d61 | hoytech/antiweb | encode.lisp | This file is part of yason , a Common Lisp JSON parser / encoder
;;
Copyright ( c ) 2008
;; All rights reserved.
;;
;; Please see the file LICENSE in the distribution.
(in-package :yason)
(defvar *json-output*)
(defmacro with-standard-output-to ((stream) &body body)
`(let ((*standard-output* ,stream))
,@body))
(defgeneric encode (object &optional stream)
(:documentation "Encode OBJECT to STREAM in JSON format. May be
specialized by applications to perform specific rendering. STREAM
defaults to *STANDARD-OUTPUT*.")
(:method ((object string) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\")
(loop
for char across object
do (case char
((#\\ #\" #\/)
(princ #\\) (princ char))
(#\Backspace
(princ #\\) (princ #\b))
(#\Page
(princ #\\) (princ #\f))
(#\Newline
(princ #\\) (princ #\n))
(#\Return
(princ #\\) (princ #\r))
(#\Tab
(princ #\\) (princ #\t))
(t
(princ char))))
(princ #\"))
object)
(:method ((object rational) &optional (stream *standard-output*))
(encode (float object) stream)
object)
(:method ((object integer) &optional (stream *standard-output*))
(princ object stream))
(:method ((object hash-table) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\{)
(let (printed)
(maphash (lambda (key value)
(if printed
(princ #\,)
(setf printed t))
(encode key stream)
(princ #\:)
(encode value stream))
object))
(princ #\}))
object)
(:method ((object vector) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\[)
(let (printed)
(loop
for value across object
do
(when printed
(princ #\,))
(setf printed t)
(encode value stream)))
(princ #\]))
object)
(:method ((object list) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\[)
(let (printed)
(dolist (value object)
(if printed
(princ #\,)
(setf printed t))
(encode value stream)))
(princ #\]))
object)
(:method ((object (eql 'true)) &optional (stream *standard-output*))
(princ "true" stream)
object)
(:method ((object (eql 'false)) &optional (stream *standard-output*))
(princ "false" stream)
object)
(:method ((object (eql 'null)) &optional (stream *standard-output*))
(princ "null" stream)
object)
(:method ((object (eql t)) &optional (stream *standard-output*))
(princ "true" stream)
object)
(:method ((object (eql nil)) &optional (stream *standard-output*))
(princ "null" stream)
object))
(defclass json-output-stream ()
((output-stream :reader output-stream
:initarg :output-stream)
(stack :accessor stack
:initform nil))
(:documentation "Objects of this class capture the state of a JSON stream encoder."))
(defun next-aggregate-element ()
(if (car (stack *json-output*))
(princ (car (stack *json-output*)) (output-stream *json-output*))
(setf (car (stack *json-output*)) #\,)))
(defmacro with-output ((stream) &body body)
"Set up a JSON streaming encoder context on STREAM, then evaluate BODY."
`(let ((*json-output* (make-instance 'json-output-stream :output-stream ,stream)))
,@body))
(defmacro with-output-to-string* (() &body body)
"Set up a JSON streaming encoder context, then evaluate BODY.
Return a string with the generated JSON output."
`(with-output-to-string (s)
(with-output (s)
,@body)))
(define-condition no-json-output-context (error)
()
(:report "No JSON output context is active")
(:documentation "This condition is signalled when one of the stream
encoding function is used outside the dynamic context of a
WITH-OUTPUT or WITH-OUTPUT-TO-STRING* body."))
(defmacro with-aggregate ((begin-char end-char) &body body)
`(progn
(unless (boundp '*json-output*)
(error 'no-json-output-context))
(when (stack *json-output*)
(next-aggregate-element))
(princ ,begin-char (output-stream *json-output*))
(push nil (stack *json-output*))
(prog1
(progn ,@body)
(pop (stack *json-output*))
(princ ,end-char (output-stream *json-output*)))))
(defmacro with-array (() &body body)
"Open a JSON array, then run BODY. Inside the body,
ENCODE-ARRAY-ELEMENT must be called to encode elements to the opened
array. Must be called within an existing JSON encoder context, see
WITH-OUTPUT and WITH-OUTPUT-TO-STRING*."
`(with-aggregate (#\[ #\]) ,@body))
(defmacro with-object (() &body body)
"Open a JSON object, then run BODY. Inside the body,
ENCODE-OBJECT-ELEMENT or WITH-OBJECT-ELEMENT must be called to encode
elements to the object. Must be called within an existing JSON
encoder context, see WITH-OUTPUT and WITH-OUTPUT-TO-STRING*."
`(with-aggregate (#\{ #\}) ,@body))
(defun encode-array-element (object)
"Encode OBJECT as next array element to the last JSON array opened
with WITH-ARRAY in the dynamic context. OBJECT is encoded using the
ENCODE generic function, so it must be of a type for which an ENCODE
method is defined."
(next-aggregate-element)
(encode object (output-stream *json-output*)))
(defun encode-object-element (key value)
"Encode KEY and VALUE as object element to the last JSON object
opened with WITH-OBJECT in the dynamic context. KEY and VALUE are
encoded using the ENCODE generic function, so they both must be of a
type for which an ENCODE method is defined."
(next-aggregate-element)
(encode key (output-stream *json-output*))
(princ #\: (output-stream *json-output*))
(encode value (output-stream *json-output*))
value)
(defmacro with-object-element ((key) &body body)
"Open a new encoding context to encode a JSON object element. KEY
is the key of the element. The value will be whatever BODY
serializes to the current JSON output context using one of the
stream encoding functions. This can be used to stream out nested
object structures."
`(progn
(next-aggregate-element)
(encode ,key (output-stream *json-output*))
(setf (car (stack *json-output*)) #\:)
(unwind-protect
(progn ,@body)
(setf (car (stack *json-output*)) #\,))))
| null | https://raw.githubusercontent.com/hoytech/antiweb/53c38f78ea01f04f6d1a1ecdca5c012e7a9ae4bb/bundled/yason/encode.lisp | lisp |
All rights reserved.
Please see the file LICENSE in the distribution. | This file is part of yason , a Common Lisp JSON parser / encoder
Copyright ( c ) 2008
(in-package :yason)
(defvar *json-output*)
(defmacro with-standard-output-to ((stream) &body body)
`(let ((*standard-output* ,stream))
,@body))
(defgeneric encode (object &optional stream)
(:documentation "Encode OBJECT to STREAM in JSON format. May be
specialized by applications to perform specific rendering. STREAM
defaults to *STANDARD-OUTPUT*.")
(:method ((object string) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\")
(loop
for char across object
do (case char
((#\\ #\" #\/)
(princ #\\) (princ char))
(#\Backspace
(princ #\\) (princ #\b))
(#\Page
(princ #\\) (princ #\f))
(#\Newline
(princ #\\) (princ #\n))
(#\Return
(princ #\\) (princ #\r))
(#\Tab
(princ #\\) (princ #\t))
(t
(princ char))))
(princ #\"))
object)
(:method ((object rational) &optional (stream *standard-output*))
(encode (float object) stream)
object)
(:method ((object integer) &optional (stream *standard-output*))
(princ object stream))
(:method ((object hash-table) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\{)
(let (printed)
(maphash (lambda (key value)
(if printed
(princ #\,)
(setf printed t))
(encode key stream)
(princ #\:)
(encode value stream))
object))
(princ #\}))
object)
(:method ((object vector) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\[)
(let (printed)
(loop
for value across object
do
(when printed
(princ #\,))
(setf printed t)
(encode value stream)))
(princ #\]))
object)
(:method ((object list) &optional (stream *standard-output*))
(with-standard-output-to (stream)
(princ #\[)
(let (printed)
(dolist (value object)
(if printed
(princ #\,)
(setf printed t))
(encode value stream)))
(princ #\]))
object)
(:method ((object (eql 'true)) &optional (stream *standard-output*))
(princ "true" stream)
object)
(:method ((object (eql 'false)) &optional (stream *standard-output*))
(princ "false" stream)
object)
(:method ((object (eql 'null)) &optional (stream *standard-output*))
(princ "null" stream)
object)
(:method ((object (eql t)) &optional (stream *standard-output*))
(princ "true" stream)
object)
(:method ((object (eql nil)) &optional (stream *standard-output*))
(princ "null" stream)
object))
(defclass json-output-stream ()
((output-stream :reader output-stream
:initarg :output-stream)
(stack :accessor stack
:initform nil))
(:documentation "Objects of this class capture the state of a JSON stream encoder."))
(defun next-aggregate-element ()
(if (car (stack *json-output*))
(princ (car (stack *json-output*)) (output-stream *json-output*))
(setf (car (stack *json-output*)) #\,)))
(defmacro with-output ((stream) &body body)
"Set up a JSON streaming encoder context on STREAM, then evaluate BODY."
`(let ((*json-output* (make-instance 'json-output-stream :output-stream ,stream)))
,@body))
(defmacro with-output-to-string* (() &body body)
"Set up a JSON streaming encoder context, then evaluate BODY.
Return a string with the generated JSON output."
`(with-output-to-string (s)
(with-output (s)
,@body)))
(define-condition no-json-output-context (error)
()
(:report "No JSON output context is active")
(:documentation "This condition is signalled when one of the stream
encoding function is used outside the dynamic context of a
WITH-OUTPUT or WITH-OUTPUT-TO-STRING* body."))
(defmacro with-aggregate ((begin-char end-char) &body body)
`(progn
(unless (boundp '*json-output*)
(error 'no-json-output-context))
(when (stack *json-output*)
(next-aggregate-element))
(princ ,begin-char (output-stream *json-output*))
(push nil (stack *json-output*))
(prog1
(progn ,@body)
(pop (stack *json-output*))
(princ ,end-char (output-stream *json-output*)))))
(defmacro with-array (() &body body)
"Open a JSON array, then run BODY. Inside the body,
ENCODE-ARRAY-ELEMENT must be called to encode elements to the opened
array. Must be called within an existing JSON encoder context, see
WITH-OUTPUT and WITH-OUTPUT-TO-STRING*."
`(with-aggregate (#\[ #\]) ,@body))
(defmacro with-object (() &body body)
"Open a JSON object, then run BODY. Inside the body,
ENCODE-OBJECT-ELEMENT or WITH-OBJECT-ELEMENT must be called to encode
elements to the object. Must be called within an existing JSON
encoder context, see WITH-OUTPUT and WITH-OUTPUT-TO-STRING*."
`(with-aggregate (#\{ #\}) ,@body))
(defun encode-array-element (object)
"Encode OBJECT as next array element to the last JSON array opened
with WITH-ARRAY in the dynamic context. OBJECT is encoded using the
ENCODE generic function, so it must be of a type for which an ENCODE
method is defined."
(next-aggregate-element)
(encode object (output-stream *json-output*)))
(defun encode-object-element (key value)
"Encode KEY and VALUE as object element to the last JSON object
opened with WITH-OBJECT in the dynamic context. KEY and VALUE are
encoded using the ENCODE generic function, so they both must be of a
type for which an ENCODE method is defined."
(next-aggregate-element)
(encode key (output-stream *json-output*))
(princ #\: (output-stream *json-output*))
(encode value (output-stream *json-output*))
value)
(defmacro with-object-element ((key) &body body)
"Open a new encoding context to encode a JSON object element. KEY
is the key of the element. The value will be whatever BODY
serializes to the current JSON output context using one of the
stream encoding functions. This can be used to stream out nested
object structures."
`(progn
(next-aggregate-element)
(encode ,key (output-stream *json-output*))
(setf (car (stack *json-output*)) #\:)
(unwind-protect
(progn ,@body)
(setf (car (stack *json-output*)) #\,))))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.