code
stringlengths 5
1.03M
| repo_name
stringlengths 5
90
| path
stringlengths 4
158
| license
stringclasses 15
values | size
int64 5
1.03M
| n_ast_errors
int64 0
53.9k
| ast_max_depth
int64 2
4.17k
| n_whitespaces
int64 0
365k
| n_ast_nodes
int64 3
317k
| n_ast_terminals
int64 1
171k
| n_ast_nonterminals
int64 1
146k
| loc
int64 -1
37.3k
| cycloplexity
int64 -1
1.31k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
-- Implements Jump Point Search
module JPS where
import qualified Data.Map.Strict as Map
import Data.Word
import qualified Data.Vector.Unboxed as U
import Data.Vector.Unboxed ((!))
import qualified Data.Set as Set
import qualified Data.List as L
import qualified Data.IntPSQ as PSQ
import Grid
import Astar
import Debug.Trace
findPathJPS :: Grid -> Int -> Int -> ([Int], Map.Map Int Int)
findPathJPS g start finish =
let
pf = newPathfinding g start finish
startnode = (SearchNode Nothing start C 0)
(result, pf') = astar expandJPS heuristicJPS pf startnode
v = visited pf'
in
case result of
Nothing -> ([ ], v)
Just sn -> (unwindJPS pf' sn, v)
unwindJPS :: Pathfinding -> SearchNode -> [Int]
unwindJPS pf (SearchNode prev i dir depth) =
case prev of
Nothing -> [i]
Just sn -> let (SearchNode _ p backdir _) = sn in
(unwindBetween (dims $ grid pf) (opposite backdir) i p) ++ unwindJPS pf sn
unwindBetween :: GridDims -> Direction -> Int -> Int -> [Int]
unwindBetween d dir s f
| s == f = [ ]
| dir == C = [s]
| not (isInBounds d s) = error $ "JPS path unwinding went out of bounds!"
| otherwise = s : unwindBetween d dir (moveInDirection d dir s) f
-- Gets direction of dest relative to src
pairToDirection :: GridDims -> Int -> Int -> Direction
pairToDirection d src dest
| dest == (n d src) = N
| dest == (ne d src) = NE
| dest == (e d src) = E
| dest == (se d src) = SE
| dest == (s d src) = S
| dest == (sw d src) = SW
| dest == (w d src) = W
| dest == (nw d src) = NW
| otherwise = C
-- Figurs out which direction to scan in and proceed
expandJPS :: ExpandFn
expandJPS pf sn = let (SearchNode prev curr dir depth) = sn in
case dir of
C -> normalExpand pf sn
_ -> scan pf sn
scan :: Pathfinding -> SearchNode -> [SearchNode]
scan pf sn
| dir `elem` [N, E, S, W] = scanStraight pf sn c depth
| otherwise = scanDiag pf sn c depth
where (SearchNode p c dir depth) = sn
heuristicJPS :: HeuristicFn
heuristicJPS = normalHeuristic
-- Diagonal scan does not need to check for forced neighbors -- the straight scans that it spawns
-- will catch them! COOL!!!
scanDiag :: Pathfinding -> SearchNode -> Int -> Int -> [SearchNode]
scanDiag pf sn i dep
| (isBlocked g i) = [ ]
| (isFinish pf i) = [(SearchNode (Just sn) i C 0)]
| otherwise = if null stScans
then (scanDiag pf sn (moveInDirection d diag i) (dep + 1))
else stScans ++
[(SearchNode (Just sn) (moveInDirection d diag i) diag (dep + 1))]
where
g = grid pf
d = dims g
diag = dir sn
(f1, f2) = fortyfives diag
stScans = (scanStraight pf (SearchNode (Just sn) i f1 dep) i dep) ++
(scanStraight pf (SearchNode (Just sn) i f2 dep) i dep)
scanStraight :: Pathfinding -> SearchNode -> Int -> Int -> [SearchNode]
scanStraight pf sn i dep
| (isBlocked g i) = [ ]
| (isFinish pf i) = [(SearchNode (Just sn) i C 0)]
| hasForcedNeighborsStraight g forward i = forcedNeighborsStraight g sn i forward dep
| otherwise = scanStraight pf sn
(moveInDirection d forward i) (dep + 1)
where
g = grid pf
d = dims g
(SearchNode prev st forward _) = sn
hasForcedNeighborsStraight :: Grid -> Direction -> Int -> Bool
hasForcedNeighborsStraight g forward i = let
(d1, d2) = nineties forward
d = dims g
in
isBlocked g (moveInDirection d d1 i) || isBlocked g (moveInDirection d d2 i)
forcedNeighborsStraight :: Grid -> SearchNode -> Int -> Direction -> Int -> [SearchNode]
forcedNeighborsStraight g sn i forward dep = let
(d1, d2) = nineties forward
continue = createSN g sn i forward dep
forced1 = createSNIfBlocked g sn i d1 (between forward d1) dep
forced2 = createSNIfBlocked g sn i d2 (between forward d2) dep
in
continue ++ forced1 ++ forced2
createSNIfBlocked :: Grid -> SearchNode -> Int -> Direction -> Direction -> Int -> [SearchNode]
createSNIfBlocked g prev i blockdir targetdir depth =
let d = dims g
blocksq = moveInDirection d blockdir i
targetsq = moveInDirection d targetdir i
sn = (SearchNode (Just prev) i targetdir depth)
in
if isBlocked g blocksq && not (isBlocked g targetsq)
then [(SearchNode (Just sn) targetsq targetdir (depth + 1))]
else [ ]
createSN :: Grid -> SearchNode -> Int -> Direction -> Int -> [SearchNode]
createSN g prev i dir depth =
let d = dims g
t = moveInDirection d dir i
sn = (SearchNode (Just prev) i dir depth)
in
if isBlocked g t then [ ] else [(SearchNode (Just sn) t dir (depth + 1))]
| hacoo/haskell-jps | pathfinding/JPS.hs | mit | 5,039 | 0 | 15 | 1,590 | 1,866 | 952 | 914 | 107 | 2 |
module Main where
import AST
import Lexer
import Parse
import ParseAST
import Verify
import System.Environment
build :: FilePath -> IO ()
build file = do
source <- readFile file
print $ run parseModule $ lexer file source
main :: IO ()
main = do
args <- getArgs
case args of
["version"] -> putStrLn "nickel 0.0.1 (November 2015)"
["help"] -> putStrLn "TODO"
["build", file] -> build file
_ -> putStrLn "Usage: version / help / build [file]"
| Nathan-Fenner/New-Nickel | Main.hs | mit | 459 | 0 | 10 | 94 | 152 | 77 | 75 | 19 | 4 |
module Light.Camera.PerspectiveCamera
( PerspectiveCamera, perspectiveCamera, perspectiveVerticalFOV
)
where
import Light.Camera
import Light.Film
import Light.Geometry
data PerspectiveCamera = PerspectiveCamera { perspectiveTransform :: Transform
, perspectiveFilm :: Film
, perspectiveVerticalFOV :: Double
}
deriving (Show)
perspectiveCamera :: Film -> Double -> PerspectiveCamera
perspectiveCamera = PerspectiveCamera identityTransform
instance Camera PerspectiveCamera where
cameraTransform = perspectiveTransform
cameraFilm = perspectiveFilm
cameraRay (PerspectiveCamera t f fovY) (fx, fy) = transform (inverse t) (Ray o d)
where o = originPoint
d = normalizeV $ Vector (x*sx) (y*sy) 1
sx = tan (fovY / 2) * (fromIntegral fw / fromIntegral fh)
sy = tan (fovY / 2)
x = (fx / fromIntegral fw) * 2 - 1
y = (fy / fromIntegral fh) * 2 - 1
(fw, fh) = filmDimensions f
instance Transformable PerspectiveCamera where
transform t' (PerspectiveCamera t f v) = PerspectiveCamera (compose t' t) f v
| jtdubs/Light | src/Light/Camera/PerspectiveCamera.hs | mit | 1,240 | 0 | 12 | 387 | 339 | 183 | 156 | 24 | 1 |
-- A circuit that is constructive in the initial state but not in the second.
module T where
import Tests.Basis
c = proc () ->
do boot <- delayAC falseA trueA -< ()
combLoop (dupA <<< notA <<< andA) -< boot
-- Check a relatively small number of sequence lengths.
test_correct =
let prop n =
let lhs = repeat ()
rhs = true : repeat bottom
in take n (simulate c lhs) == take n rhs
in and (map prop [0..100])
test_constructive = isNothing (isConstructive c)
ok_netlist = runNL c
| peteg/ADHOC | Tests/02_SequentialCircuits/025_nonconstructive_reachable_state.hs | gpl-2.0 | 531 | 1 | 14 | 146 | 171 | 84 | 87 | -1 | -1 |
module Handler.Semesters where
import Import
import Control.Types
import Data.Time (getCurrentTime)
import Autolib.Util.Sort (sortBy)
getSemestersR :: SchuleId -> Handler Html
getSemestersR schuleId = do
semesters <- runDB $ selectList [SemesterSchuleId ==. schuleId] []
zeit <- liftIO $ getCurrentTime
let semestersSortiert = sortBy (\ s -> zeitStatus (semesterVon $ entityVal s) (semesterBis $ entityVal s) zeit /= Current) semesters
mid <- maybeAuthId
semestersAutorisiert' <- mapM (autorisiertSemester mid) semestersSortiert
let semestersAutorisiert = concat semestersAutorisiert'
defaultLayout $ do
$(widgetFile "semesters")
autorisiertSemester :: Maybe (AuthId Autotool) -> Entity Semester -> Handler [(Semester, Maybe (Route Autotool), Maybe (Route Autotool))]
autorisiertSemester mid semester = do
let semesterRoute = VorlesungenR $ entityKey semester
bearbeitenRoute = SemesterR $ entityKey semester
ist (Just True) route = Just route
ist _ _ = Nothing
autorisiertS <- istAutorisiert mid semesterRoute
autorisiertB <- istAutorisiert mid bearbeitenRoute
if autorisiertS == Just True || autorisiertB == Just True
then return [(entityVal semester
,ist autorisiertS semesterRoute
,ist autorisiertB bearbeitenRoute)]
else return []
listGroupItemClass :: TimeStatus -> Text
listGroupItemClass status = case status of
Early -> "list-group-item-warning"
Late -> "list-group-item-danger"
Current -> "list-group-item-success"
| marcellussiegburg/autotool | yesod/Handler/Semesters.hs | gpl-2.0 | 1,526 | 0 | 17 | 277 | 448 | 219 | 229 | -1 | -1 |
{-# LANGUAGE PatternGuards, CPP, ScopedTypeVariables #-}
{-
Copyright (C) 2010-2014 John MacFarlane <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-}
{- |
Module : Text.Pandoc.Writers.EPUB
Copyright : Copyright (C) 2010-2014 John MacFarlane
License : GNU GPL, version 2 or above
Maintainer : John MacFarlane <[email protected]>
Stability : alpha
Portability : portable
Conversion of 'Pandoc' documents to EPUB.
-}
module Text.Pandoc.Writers.EPUB ( writeEPUB ) where
import Data.IORef
import qualified Data.Map as M
import Data.Maybe ( fromMaybe )
import Data.List ( isInfixOf, intercalate )
import System.Environment ( getEnv )
import Text.Printf (printf)
import System.FilePath ( (</>), takeExtension, takeFileName )
import qualified Data.ByteString.Lazy as B
import qualified Data.ByteString.Lazy.Char8 as B8
import qualified Text.Pandoc.UTF8 as UTF8
import Text.Pandoc.SelfContained ( makeSelfContained )
import Codec.Archive.Zip
import Control.Applicative ((<$>))
import Data.Time.Clock.POSIX
import Data.Time
import System.Locale
import Text.Pandoc.Shared hiding ( Element )
import qualified Text.Pandoc.Shared as Shared
import Text.Pandoc.Builder (fromList, setMeta)
import Text.Pandoc.Options
import Text.Pandoc.Definition
import Text.Pandoc.Walk
import Control.Monad.State
import Text.XML.Light hiding (ppTopElement)
import Text.Pandoc.UUID
import Text.Pandoc.Writers.HTML
import Text.Pandoc.Writers.Markdown ( writePlain )
import Data.Char ( toLower, isDigit, isAlphaNum )
import Network.URI ( unEscapeString )
import Text.Pandoc.MIME (getMimeType)
import qualified Control.Exception as E
import Text.Blaze.Html.Renderer.Utf8 (renderHtml)
import Text.HTML.TagSoup
-- A Chapter includes a list of blocks and maybe a section
-- number offset. Note, some chapters are unnumbered. The section
-- number is different from the index number, which will be used
-- in filenames, chapter0003.xhtml.
data Chapter = Chapter (Maybe [Int]) [Block]
data EPUBMetadata = EPUBMetadata{
epubIdentifier :: [Identifier]
, epubTitle :: [Title]
, epubDate :: [Date]
, epubLanguage :: String
, epubCreator :: [Creator]
, epubContributor :: [Creator]
, epubSubject :: [String]
, epubDescription :: Maybe String
, epubType :: Maybe String
, epubFormat :: Maybe String
, epubPublisher :: Maybe String
, epubSource :: Maybe String
, epubRelation :: Maybe String
, epubCoverage :: Maybe String
, epubRights :: Maybe String
, epubCoverImage :: Maybe String
, epubStylesheet :: Maybe Stylesheet
} deriving Show
data Stylesheet = StylesheetPath FilePath
| StylesheetContents String
deriving Show
data Date = Date{
dateText :: String
, dateEvent :: Maybe String
} deriving Show
data Creator = Creator{
creatorText :: String
, creatorRole :: Maybe String
, creatorFileAs :: Maybe String
} deriving Show
data Identifier = Identifier{
identifierText :: String
, identifierScheme :: Maybe String
} deriving Show
data Title = Title{
titleText :: String
, titleFileAs :: Maybe String
, titleType :: Maybe String
} deriving Show
dcName :: String -> QName
dcName n = QName n Nothing (Just "dc")
dcNode :: Node t => String -> t -> Element
dcNode = node . dcName
opfName :: String -> QName
opfName n = QName n Nothing (Just "opf")
plainify :: [Inline] -> String
plainify t =
trimr $ writePlain def{ writerStandalone = False }
$ Pandoc nullMeta [Plain $ walk removeNote t]
removeNote :: Inline -> Inline
removeNote (Note _) = Str ""
removeNote x = x
toId :: FilePath -> String
toId = map (\x -> if isAlphaNum x || x == '-' || x == '_'
then x
else '_') . takeFileName
getEPUBMetadata :: WriterOptions -> Meta -> IO EPUBMetadata
getEPUBMetadata opts meta = do
let md = metadataFromMeta opts meta
let elts = onlyElems $ parseXML $ writerEpubMetadata opts
let md' = foldr addMetadataFromXML md elts
let addIdentifier m =
if null (epubIdentifier m)
then do
randomId <- fmap show getRandomUUID
return $ m{ epubIdentifier = [Identifier randomId Nothing] }
else return m
let addLanguage m =
if null (epubLanguage m)
then case lookup "lang" (writerVariables opts) of
Just x -> return m{ epubLanguage = x }
Nothing -> do
localeLang <- E.catch (liftM
(map (\c -> if c == '_' then '-' else c) .
takeWhile (/='.')) $ getEnv "LANG")
(\e -> let _ = (e :: E.SomeException) in return "en-US")
return m{ epubLanguage = localeLang }
else return m
let fixDate m =
if null (epubDate m)
then do
currentTime <- getCurrentTime
return $ m{ epubDate = [ Date{
dateText = showDateTimeISO8601 currentTime
, dateEvent = Nothing } ] }
else return m
let addAuthor m =
if any (\c -> creatorRole c == Just "aut") $ epubCreator m
then return m
else do
let authors' = map plainify $ docAuthors meta
let toAuthor name = Creator{ creatorText = name
, creatorRole = Just "aut"
, creatorFileAs = Nothing }
return $ m{ epubCreator = map toAuthor authors' ++ epubCreator m }
addIdentifier md' >>= fixDate >>= addAuthor >>= addLanguage
addMetadataFromXML :: Element -> EPUBMetadata -> EPUBMetadata
addMetadataFromXML e@(Element (QName name _ (Just "dc")) attrs _ _) md
| name == "identifier" = md{ epubIdentifier =
Identifier{ identifierText = strContent e
, identifierScheme = lookupAttr (opfName "scheme") attrs
} : epubIdentifier md }
| name == "title" = md{ epubTitle =
Title{ titleText = strContent e
, titleFileAs = getAttr "file-as"
, titleType = getAttr "type"
} : epubTitle md }
| name == "date" = md{ epubDate =
Date{ dateText = fromMaybe "" $ normalizeDate' $ strContent e
, dateEvent = getAttr "event"
} : epubDate md }
| name == "language" = md{ epubLanguage = strContent e }
| name == "creator" = md{ epubCreator =
Creator{ creatorText = strContent e
, creatorRole = getAttr "role"
, creatorFileAs = getAttr "file-as"
} : epubCreator md }
| name == "contributor" = md{ epubContributor =
Creator { creatorText = strContent e
, creatorRole = getAttr "role"
, creatorFileAs = getAttr "file-as"
} : epubContributor md }
| name == "subject" = md{ epubSubject = strContent e : epubSubject md }
| name == "description" = md { epubDescription = Just $ strContent e }
| name == "type" = md { epubType = Just $ strContent e }
| name == "format" = md { epubFormat = Just $ strContent e }
| name == "type" = md { epubType = Just $ strContent e }
| name == "publisher" = md { epubPublisher = Just $ strContent e }
| name == "source" = md { epubSource = Just $ strContent e }
| name == "relation" = md { epubRelation = Just $ strContent e }
| name == "coverage" = md { epubCoverage = Just $ strContent e }
| name == "rights" = md { epubRights = Just $ strContent e }
| otherwise = md
where getAttr n = lookupAttr (opfName n) attrs
addMetadataFromXML _ md = md
metaValueToString :: MetaValue -> String
metaValueToString (MetaString s) = s
metaValueToString (MetaInlines ils) = plainify ils
metaValueToString (MetaBlocks bs) = plainify $ query (:[]) bs
metaValueToString (MetaBool b) = show b
metaValueToString _ = ""
getList :: String -> Meta -> (MetaValue -> a) -> [a]
getList s meta handleMetaValue =
case lookupMeta s meta of
Just (MetaList xs) -> map handleMetaValue xs
Just mv -> [handleMetaValue mv]
Nothing -> []
getIdentifier :: Meta -> [Identifier]
getIdentifier meta = getList "identifier" meta handleMetaValue
where handleMetaValue (MetaMap m) =
Identifier{ identifierText = maybe "" metaValueToString
$ M.lookup "text" m
, identifierScheme = metaValueToString <$>
M.lookup "scheme" m }
handleMetaValue mv = Identifier (metaValueToString mv) Nothing
getTitle :: Meta -> [Title]
getTitle meta = getList "title" meta handleMetaValue
where handleMetaValue (MetaMap m) =
Title{ titleText = maybe "" metaValueToString $ M.lookup "text" m
, titleFileAs = metaValueToString <$> M.lookup "file-as" m
, titleType = metaValueToString <$> M.lookup "type" m }
handleMetaValue mv = Title (metaValueToString mv) Nothing Nothing
getCreator :: String -> Meta -> [Creator]
getCreator s meta = getList s meta handleMetaValue
where handleMetaValue (MetaMap m) =
Creator{ creatorText = maybe "" metaValueToString $ M.lookup "text" m
, creatorFileAs = metaValueToString <$> M.lookup "file-as" m
, creatorRole = metaValueToString <$> M.lookup "role" m }
handleMetaValue mv = Creator (metaValueToString mv) Nothing Nothing
getDate :: String -> Meta -> [Date]
getDate s meta = getList s meta handleMetaValue
where handleMetaValue (MetaMap m) =
Date{ dateText = maybe "" id $
M.lookup "text" m >>= normalizeDate' . metaValueToString
, dateEvent = metaValueToString <$> M.lookup "event" m }
handleMetaValue mv = Date { dateText = maybe ""
id $ normalizeDate' $ metaValueToString mv
, dateEvent = Nothing }
simpleList :: String -> Meta -> [String]
simpleList s meta =
case lookupMeta s meta of
Just (MetaList xs) -> map metaValueToString xs
Just x -> [metaValueToString x]
Nothing -> []
metadataFromMeta :: WriterOptions -> Meta -> EPUBMetadata
metadataFromMeta opts meta = EPUBMetadata{
epubIdentifier = identifiers
, epubTitle = titles
, epubDate = date
, epubLanguage = language
, epubCreator = creators
, epubContributor = contributors
, epubSubject = subjects
, epubDescription = description
, epubType = epubtype
, epubFormat = format
, epubPublisher = publisher
, epubSource = source
, epubRelation = relation
, epubCoverage = coverage
, epubRights = rights
, epubCoverImage = coverImage
, epubStylesheet = stylesheet
}
where identifiers = getIdentifier meta
titles = getTitle meta
date = getDate "date" meta
language = maybe "" metaValueToString $
lookupMeta "language" meta `mplus` lookupMeta "lang" meta
creators = getCreator "creator" meta
contributors = getCreator "contributor" meta
subjects = simpleList "subject" meta
description = metaValueToString <$> lookupMeta "description" meta
epubtype = metaValueToString <$> lookupMeta "type" meta
format = metaValueToString <$> lookupMeta "format" meta
publisher = metaValueToString <$> lookupMeta "publisher" meta
source = metaValueToString <$> lookupMeta "source" meta
relation = metaValueToString <$> lookupMeta "relation" meta
coverage = metaValueToString <$> lookupMeta "coverage" meta
rights = metaValueToString <$> lookupMeta "rights" meta
coverImage = lookup "epub-cover-image" (writerVariables opts) `mplus`
(metaValueToString <$> lookupMeta "cover-image" meta)
stylesheet = (StylesheetContents <$> writerEpubStylesheet opts) `mplus`
((StylesheetPath . metaValueToString) <$>
lookupMeta "stylesheet" meta)
-- | Produce an EPUB file from a Pandoc document.
writeEPUB :: WriterOptions -- ^ Writer options
-> Pandoc -- ^ Document to convert
-> IO B.ByteString
writeEPUB opts doc@(Pandoc meta _) = do
let version = fromMaybe EPUB2 (writerEpubVersion opts)
let epub3 = version == EPUB3
epochtime <- floor `fmap` getPOSIXTime
let mkEntry path content = toEntry path epochtime content
let vars = ("epub3", if epub3 then "true" else "false")
: ("css", "stylesheet.css")
: writerVariables opts
let opts' = opts{ writerEmailObfuscation = NoObfuscation
, writerStandalone = True
, writerSectionDivs = True
, writerHtml5 = epub3
, writerTableOfContents = False -- we always have one in epub
, writerVariables = vars
, writerHTMLMathMethod =
if epub3
then MathML Nothing
else writerHTMLMathMethod opts
, writerWrapText = False }
metadata <- getEPUBMetadata opts' meta
-- cover page
(cpgEntry, cpicEntry) <-
case epubCoverImage metadata of
Nothing -> return ([],[])
Just img -> do
let coverImage = "media/" ++ takeFileName img
let cpContent = renderHtml $ writeHtml opts'
(Pandoc meta [RawBlock (Format "html") $ "<div id=\"cover-image\">\n<img src=\"" ++ coverImage ++ "\" alt=\"cover image\" />\n</div>"])
imgContent <- B.readFile img
return ( [mkEntry "cover.xhtml" cpContent]
, [mkEntry coverImage imgContent] )
-- title page
let tpContent = renderHtml $ writeHtml opts'{
writerVariables = ("titlepage","true"):vars }
(Pandoc meta [])
let tpEntry = mkEntry "title_page.xhtml" tpContent
-- handle pictures
mediaRef <- newIORef []
Pandoc _ blocks <- walkM (transformInline opts' mediaRef) doc >>=
walkM (transformBlock opts' mediaRef)
pics <- readIORef mediaRef
let readPicEntry entries (oldsrc, newsrc) = do
res <- fetchItem (writerSourceURL opts') oldsrc
case res of
Left _ -> do
warn $ "Could not find media `" ++ oldsrc ++ "', skipping..."
return entries
Right (img,_) -> return $
(toEntry newsrc epochtime $ B.fromChunks . (:[]) $ img) : entries
picEntries <- foldM readPicEntry [] pics
-- handle fonts
let mkFontEntry f = mkEntry (takeFileName f) `fmap` B.readFile f
fontEntries <- mapM mkFontEntry $ writerEpubFonts opts'
-- body pages
-- add level 1 header to beginning if none there
let blocks' = addIdentifiers
$ case blocks of
(Header 1 _ _ : _) -> blocks
_ -> Header 1 ("",["unnumbered"],[])
(docTitle meta) : blocks
let chapterHeaderLevel = writerEpubChapterLevel opts
-- internal reference IDs change when we chunk the file,
-- so that '#my-header-1' might turn into 'chap004.xhtml#my-header'.
-- the next two lines fix that:
let reftable = correlateRefs chapterHeaderLevel blocks'
let blocks'' = replaceRefs reftable blocks'
let isChapterHeader (Header n _ _) = n <= chapterHeaderLevel
isChapterHeader _ = False
let toChapters :: [Block] -> State [Int] [Chapter]
toChapters [] = return []
toChapters (Header n attr@(_,classes,_) ils : bs) = do
nums <- get
mbnum <- if "unnumbered" `elem` classes
then return Nothing
else case splitAt (n - 1) nums of
(ks, (m:_)) -> do
let nums' = ks ++ [m+1]
put nums'
return $ Just (ks ++ [m])
-- note, this is the offset not the sec number
(ks, []) -> do
let nums' = ks ++ [1]
put nums'
return $ Just ks
let (xs,ys) = break isChapterHeader bs
(Chapter mbnum (Header n attr ils : xs) :) `fmap` toChapters ys
toChapters (b:bs) = do
let (xs,ys) = break isChapterHeader bs
(Chapter Nothing (b:xs) :) `fmap` toChapters ys
let chapters = evalState (toChapters blocks'') []
let chapToEntry :: Int -> Chapter -> Entry
chapToEntry num (Chapter mbnum bs) = mkEntry (showChapter num)
$ renderHtml
$ writeHtml opts'{ writerNumberOffset =
fromMaybe [] mbnum }
$ case bs of
(Header _ _ xs : _) ->
-- remove notes or we get doubled footnotes
Pandoc (setMeta "title" (walk removeNote $ fromList xs)
nullMeta) bs
_ ->
Pandoc nullMeta bs
let chapterEntries = zipWith chapToEntry [1..] chapters
-- incredibly inefficient (TODO):
let containsMathML ent = epub3 &&
"<math" `isInfixOf` (B8.unpack $ fromEntry ent)
let containsSVG ent = epub3 &&
"<svg" `isInfixOf` (B8.unpack $ fromEntry ent)
let props ent = ["mathml" | containsMathML ent] ++ ["svg" | containsSVG ent]
-- contents.opf
let chapterNode ent = unode "item" !
([("id", toId $ eRelativePath ent),
("href", eRelativePath ent),
("media-type", "application/xhtml+xml")]
++ case props ent of
[] -> []
xs -> [("properties", unwords xs)])
$ ()
let chapterRefNode ent = unode "itemref" !
[("idref", toId $ eRelativePath ent)] $ ()
let pictureNode ent = unode "item" !
[("id", toId $ eRelativePath ent),
("href", eRelativePath ent),
("media-type", fromMaybe "application/octet-stream"
$ mediaTypeOf $ eRelativePath ent)] $ ()
let fontNode ent = unode "item" !
[("id", toId $ eRelativePath ent),
("href", eRelativePath ent),
("media-type", fromMaybe "" $ getMimeType $ eRelativePath ent)] $ ()
let plainTitle = case docTitle meta of
[] -> case epubTitle metadata of
[] -> "UNTITLED"
(x:_) -> titleText x
x -> plainify x
let uuid = case epubIdentifier metadata of
(x:_) -> identifierText x -- use first identifier as UUID
[] -> error "epubIdentifier is null" -- shouldn't happen
currentTime <- getCurrentTime
let contentsData = UTF8.fromStringLazy $ ppTopElement $
unode "package" ! [("version", case version of
EPUB2 -> "2.0"
EPUB3 -> "3.0")
,("xmlns","http://www.idpf.org/2007/opf")
,("unique-identifier","epub-id-1")] $
[ metadataElement version metadata currentTime
, unode "manifest" $
[ unode "item" ! [("id","ncx"), ("href","toc.ncx")
,("media-type","application/x-dtbncx+xml")] $ ()
, unode "item" ! [("id","style"), ("href","stylesheet.css")
,("media-type","text/css")] $ ()
, unode "item" ! ([("id","nav")
,("href","nav.xhtml")
,("media-type","application/xhtml+xml")] ++
[("properties","nav") | epub3 ]) $ ()
] ++
map chapterNode (cpgEntry ++ (tpEntry : chapterEntries)) ++
(case cpicEntry of
[] -> []
(x:_) -> [add_attrs
[Attr (unqual "properties") "cover-image" | epub3]
(pictureNode x)]) ++
map pictureNode picEntries ++
map fontNode fontEntries
, unode "spine" ! [("toc","ncx")] $
case epubCoverImage metadata of
Nothing -> []
Just _ -> [ unode "itemref" !
[("idref", "cover_xhtml"),("linear","no")] $ () ]
++ ((unode "itemref" ! [("idref", "title_page_xhtml")
,("linear", if null (docTitle meta)
then "no"
else "yes")] $ ()) :
(unode "itemref" ! [("idref", "nav")
,("linear", if writerTableOfContents opts
then "yes"
else "no")] $ ()) :
map chapterRefNode chapterEntries)
, unode "guide" $
[ unode "reference" !
[("type","toc"),("title",plainTitle),
("href","nav.xhtml")] $ ()
] ++
[ unode "reference" !
[("type","cover"),("title","Cover"),("href","cover.xhtml")] $ () | epubCoverImage metadata /= Nothing
]
]
let contentsEntry = mkEntry "content.opf" contentsData
-- toc.ncx
let secs = hierarchicalize blocks''
let tocLevel = writerTOCDepth opts
let navPointNode :: (Int -> String -> String -> [Element] -> Element)
-> Shared.Element -> State Int Element
navPointNode formatter (Sec _ nums (ident,_,_) ils children) = do
n <- get
modify (+1)
let showNums :: [Int] -> String
showNums = intercalate "." . map show
let tit' = plainify ils
let tit = if writerNumberSections opts && not (null nums)
then showNums nums ++ " " ++ tit'
else tit'
let src = case lookup ident reftable of
Just x -> x
Nothing -> error (ident ++ " not found in reftable")
let isSec (Sec lev _ _ _ _) = lev <= tocLevel
isSec _ = False
let subsecs = filter isSec children
subs <- mapM (navPointNode formatter) subsecs
return $ formatter n tit src subs
navPointNode _ (Blk _) = error "navPointNode encountered Blk"
let navMapFormatter :: Int -> String -> String -> [Element] -> Element
navMapFormatter n tit src subs = unode "navPoint" !
[("id", "navPoint-" ++ show n)
,("playOrder", show n)] $
[ unode "navLabel" $ unode "text" tit
, unode "content" ! [("src", src)] $ ()
] ++ subs
let tpNode = unode "navPoint" ! [("id", "navPoint-0")] $
[ unode "navLabel" $ unode "text" (plainify $ docTitle meta)
, unode "content" ! [("src","title_page.xhtml")] $ () ]
let tocData = UTF8.fromStringLazy $ ppTopElement $
unode "ncx" ! [("version","2005-1")
,("xmlns","http://www.daisy.org/z3986/2005/ncx/")] $
[ unode "head" $
[ unode "meta" ! [("name","dtb:uid")
,("content", uuid)] $ ()
, unode "meta" ! [("name","dtb:depth")
,("content", "1")] $ ()
, unode "meta" ! [("name","dtb:totalPageCount")
,("content", "0")] $ ()
, unode "meta" ! [("name","dtb:maxPageNumber")
,("content", "0")] $ ()
] ++ case epubCoverImage metadata of
Nothing -> []
Just img -> [unode "meta" ! [("name","cover"),
("content", toId img)] $ ()]
, unode "docTitle" $ unode "text" $ plainTitle
, unode "navMap" $
tpNode : evalState (mapM (navPointNode navMapFormatter) secs) 1
]
let tocEntry = mkEntry "toc.ncx" tocData
let navXhtmlFormatter :: Int -> String -> String -> [Element] -> Element
navXhtmlFormatter n tit src subs = unode "li" !
[("id", "toc-li-" ++ show n)] $
(unode "a" ! [("href",src)]
$ (unode "span" tit))
: case subs of
[] -> []
(_:_) -> [unode "ol" ! [("class","toc")] $ subs]
let navtag = if epub3 then "nav" else "div"
let navData = UTF8.fromStringLazy $ ppTopElement $
unode "html" ! [("xmlns","http://www.w3.org/1999/xhtml")
,("xmlns:epub","http://www.idpf.org/2007/ops")] $
[ unode "head" $
[ unode "title" plainTitle
, unode "link" ! [("rel","stylesheet"),("type","text/css"),("href","stylesheet.css")] $ () ]
, unode "body" $
unode navtag ! [("epub:type","toc") | epub3] $
[ unode "h1" ! [("id","toc-title")] $ plainTitle
, unode "ol" ! [("class","toc")] $ evalState (mapM (navPointNode navXhtmlFormatter) secs) 1]
]
let navEntry = mkEntry "nav.xhtml" navData
-- mimetype
let mimetypeEntry = mkEntry "mimetype" $ UTF8.fromStringLazy "application/epub+zip"
-- container.xml
let containerData = UTF8.fromStringLazy $ ppTopElement $
unode "container" ! [("version","1.0")
,("xmlns","urn:oasis:names:tc:opendocument:xmlns:container")] $
unode "rootfiles" $
unode "rootfile" ! [("full-path","content.opf")
,("media-type","application/oebps-package+xml")] $ ()
let containerEntry = mkEntry "META-INF/container.xml" containerData
-- com.apple.ibooks.display-options.xml
let apple = UTF8.fromStringLazy $ ppTopElement $
unode "display_options" $
unode "platform" ! [("name","*")] $
unode "option" ! [("name","specified-fonts")] $ "true"
let appleEntry = mkEntry "META-INF/com.apple.ibooks.display-options.xml" apple
-- stylesheet
stylesheet <- case epubStylesheet metadata of
Just (StylesheetPath fp) -> UTF8.readFile fp
Just (StylesheetContents s) -> return s
Nothing -> UTF8.toString `fmap`
readDataFile (writerUserDataDir opts) "epub.css"
let stylesheetEntry = mkEntry "stylesheet.css" $ UTF8.fromStringLazy stylesheet
-- construct archive
let archive = foldr addEntryToArchive emptyArchive
(mimetypeEntry : containerEntry : appleEntry : stylesheetEntry : tpEntry :
contentsEntry : tocEntry : navEntry :
(picEntries ++ cpicEntry ++ cpgEntry ++ chapterEntries ++ fontEntries))
return $ fromArchive archive
metadataElement :: EPUBVersion -> EPUBMetadata -> UTCTime -> Element
metadataElement version md currentTime =
unode "metadata" ! [("xmlns:dc","http://purl.org/dc/elements/1.1/")
,("xmlns:opf","http://www.idpf.org/2007/opf")] $ mdNodes
where mdNodes = identifierNodes ++ titleNodes ++ dateNodes ++ languageNodes
++ creatorNodes ++ contributorNodes ++ subjectNodes
++ descriptionNodes ++ typeNodes ++ formatNodes
++ publisherNodes ++ sourceNodes ++ relationNodes
++ coverageNodes ++ rightsNodes ++ coverImageNodes
++ modifiedNodes
withIds base f = concat . zipWith f (map (\x -> base ++ ('-' : show x))
([1..] :: [Int]))
identifierNodes = withIds "epub-id" toIdentifierNode $
epubIdentifier md
titleNodes = withIds "epub-title" toTitleNode $ epubTitle md
dateNodes = if version == EPUB2
then withIds "epub-date" toDateNode $ epubDate md
else -- epub3 allows only one dc:date
-- http://www.idpf.org/epub/30/spec/epub30-publications.html#sec-opf-dcdate
case epubDate md of
[] -> []
(x:_) -> [dcNode "date" ! [("id","epub-date")]
$ dateText x]
languageNodes = [dcTag "language" $ epubLanguage md]
creatorNodes = withIds "epub-creator" (toCreatorNode "creator") $
epubCreator md
contributorNodes = withIds "epub-contributor"
(toCreatorNode "contributor") $ epubContributor md
subjectNodes = map (dcTag "subject") $ epubSubject md
descriptionNodes = maybe [] (dcTag' "description") $ epubDescription md
typeNodes = maybe [] (dcTag' "type") $ epubType md
formatNodes = maybe [] (dcTag' "format") $ epubFormat md
publisherNodes = maybe [] (dcTag' "publisher") $ epubPublisher md
sourceNodes = maybe [] (dcTag' "source") $ epubSource md
relationNodes = maybe [] (dcTag' "relation") $ epubRelation md
coverageNodes = maybe [] (dcTag' "coverage") $ epubCoverage md
rightsNodes = maybe [] (dcTag' "rights") $ epubRights md
coverImageNodes = maybe []
(\img -> [unode "meta" ! [("name","cover"),
("content",toId img)] $ ()])
$ epubCoverImage md
modifiedNodes = [ unode "meta" ! [("property", "dcterms:modified")] $
(showDateTimeISO8601 currentTime) | version == EPUB3 ]
dcTag n s = unode ("dc:" ++ n) s
dcTag' n s = [dcTag n s]
toIdentifierNode id' (Identifier txt scheme)
| version == EPUB2 = [dcNode "identifier" !
([("id",id')] ++ maybe [] (\x -> [("opf:scheme", x)]) scheme) $
txt]
| otherwise = [dcNode "identifier" ! [("id",id')] $ txt] ++
maybe [] (\x -> [unode "meta" !
[("refines",'#':id'),("property","identifier-type"),
("scheme","onix:codelist5")] $ x])
(schemeToOnix `fmap` scheme)
toCreatorNode s id' creator
| version == EPUB2 = [dcNode s !
(("id",id') :
maybe [] (\x -> [("opf:file-as",x)]) (creatorFileAs creator) ++
maybe [] (\x -> [("opf:role",x)])
(creatorRole creator >>= toRelator)) $ creatorText creator]
| otherwise = [dcNode s ! [("id",id')] $ creatorText creator] ++
maybe [] (\x -> [unode "meta" !
[("refines",'#':id'),("property","file-as")] $ x])
(creatorFileAs creator) ++
maybe [] (\x -> [unode "meta" !
[("refines",'#':id'),("property","role"),
("scheme","marc:relators")] $ x])
(creatorRole creator >>= toRelator)
toTitleNode id' title
| version == EPUB2 = [dcNode "title" !
(("id",id') :
maybe [] (\x -> [("opf:file-as",x)]) (titleFileAs title) ++
maybe [] (\x -> [("opf:title-type",x)]) (titleType title)) $
titleText title]
| otherwise = [dcNode "title" ! [("id",id')] $ titleText title]
++
maybe [] (\x -> [unode "meta" !
[("refines",'#':id'),("property","file-as")] $ x])
(titleFileAs title) ++
maybe [] (\x -> [unode "meta" !
[("refines",'#':id'),("property","title-type")] $ x])
(titleType title)
toDateNode id' date = [dcNode "date" !
(("id",id') :
maybe [] (\x -> [("opf:event",x)]) (dateEvent date)) $
dateText date]
schemeToOnix "ISBN-10" = "02"
schemeToOnix "GTIN-13" = "03"
schemeToOnix "UPC" = "04"
schemeToOnix "ISMN-10" = "05"
schemeToOnix "DOI" = "06"
schemeToOnix "LCCN" = "13"
schemeToOnix "GTIN-14" = "14"
schemeToOnix "ISBN-13" = "15"
schemeToOnix "Legal deposit number" = "17"
schemeToOnix "URN" = "22"
schemeToOnix "OCLC" = "23"
schemeToOnix "ISMN-13" = "25"
schemeToOnix "ISBN-A" = "26"
schemeToOnix "JP" = "27"
schemeToOnix "OLCC" = "28"
schemeToOnix _ = "01"
showDateTimeISO8601 :: UTCTime -> String
showDateTimeISO8601 = formatTime defaultTimeLocale "%FT%TZ"
transformTag :: WriterOptions
-> IORef [(FilePath, FilePath)] -- ^ (oldpath, newpath) media
-> Tag String
-> IO (Tag String)
transformTag opts mediaRef tag@(TagOpen name attr)
| name == "video" || name == "source" || name == "img" = do
let src = fromAttrib "src" tag
let poster = fromAttrib "poster" tag
let oldsrc = maybe src (</> src) $ writerSourceURL opts
let oldposter = maybe poster (</> poster) $ writerSourceURL opts
newsrc <- modifyMediaRef mediaRef oldsrc
newposter <- modifyMediaRef mediaRef oldposter
let attr' = filter (\(x,_) -> x /= "src" && x /= "poster") attr ++
[("src", newsrc) | not (null newsrc)] ++
[("poster", newposter) | not (null newposter)]
return $ TagOpen name attr'
transformTag _ _ tag = return tag
modifyMediaRef :: IORef [(FilePath, FilePath)] -> FilePath -> IO FilePath
modifyMediaRef _ "" = return ""
modifyMediaRef mediaRef oldsrc = do
media <- readIORef mediaRef
case lookup oldsrc media of
Just n -> return n
Nothing -> do
let new = "media/file" ++ show (length media) ++
takeExtension oldsrc
modifyIORef mediaRef ( (oldsrc, new): )
return new
transformBlock :: WriterOptions
-> IORef [(FilePath, FilePath)] -- ^ (oldpath, newpath) media
-> Block
-> IO Block
transformBlock opts mediaRef (RawBlock fmt raw)
| fmt == Format "html" = do
let tags = parseTags raw
tags' <- mapM (transformTag opts mediaRef) tags
return $ RawBlock fmt (renderTags tags')
transformBlock _ _ b = return b
transformInline :: WriterOptions
-> IORef [(FilePath, FilePath)] -- ^ (oldpath, newpath) media
-> Inline
-> IO Inline
transformInline opts mediaRef (Image lab (src,tit)) = do
let src' = unEscapeString src
let oldsrc = maybe src' (</> src) $ writerSourceURL opts
newsrc <- modifyMediaRef mediaRef oldsrc
return $ Image lab (newsrc, tit)
transformInline opts _ (x@(Math _ _))
| WebTeX _ <- writerHTMLMathMethod opts = do
raw <- makeSelfContained Nothing $ writeHtmlInline opts x
return $ RawInline (Format "html") raw
transformInline _ _ x = return x
writeHtmlInline :: WriterOptions -> Inline -> String
writeHtmlInline opts z = trimr $
writeHtmlString opts{ writerStandalone = False }
$ Pandoc nullMeta [Plain [z]]
(!) :: Node t => (t -> Element) -> [(String, String)] -> t -> Element
(!) f attrs n = add_attrs (map (\(k,v) -> Attr (unqual k) v) attrs) (f n)
-- | Version of 'ppTopElement' that specifies UTF-8 encoding.
ppTopElement :: Element -> String
ppTopElement = ("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ++) . unEntity . ppElement
-- unEntity removes numeric entities introduced by ppElement
-- (kindlegen seems to choke on these).
where unEntity [] = ""
unEntity ('&':'#':xs) =
let (ds,ys) = break (==';') xs
rest = drop 1 ys
in case safeRead ('\'':'\\':ds ++ "'") of
Just x -> x : unEntity rest
Nothing -> '&':'#':unEntity xs
unEntity (x:xs) = x : unEntity xs
mediaTypeOf :: FilePath -> Maybe String
mediaTypeOf x = case getMimeType x of
Just y@('i':'m':'a':'g':'e':_) -> Just y
Just y@('v':'i':'d':'e':'o':_) -> Just y
Just y@('a':'u':'d':'i':'o':_) -> Just y
_ -> Nothing
data IdentState = IdentState{
chapterNumber :: Int,
identTable :: [(String,String)]
} deriving (Read, Show)
-- Returns filename for chapter number.
showChapter :: Int -> String
showChapter = printf "ch%03d.xhtml"
-- Add identifiers to any headers without them.
addIdentifiers :: [Block] -> [Block]
addIdentifiers bs = evalState (mapM go bs) []
where go (Header n (ident,classes,kvs) ils) = do
ids <- get
let ident' = if null ident
then uniqueIdent ils ids
else ident
put $ ident' : ids
return $ Header n (ident',classes,kvs) ils
go x = return x
-- Go through a block list and construct a table
-- correlating the automatically constructed references
-- that would be used in a normal pandoc document with
-- new URLs to be used in the EPUB. For example, what
-- was "header-1" might turn into "ch006.xhtml#header".
correlateRefs :: Int -> [Block] -> [(String,String)]
correlateRefs chapterHeaderLevel bs =
identTable $ execState (mapM_ go bs)
IdentState{ chapterNumber = 0
, identTable = [] }
where go :: Block -> State IdentState ()
go (Header n (ident,_,_) _) = do
when (n <= chapterHeaderLevel) $
modify $ \s -> s{ chapterNumber = chapterNumber s + 1 }
st <- get
let chapterid = showChapter (chapterNumber st) ++
if n <= chapterHeaderLevel
then ""
else '#' : ident
modify $ \s -> s{ identTable = (ident, chapterid) : identTable st }
go _ = return ()
-- Replace internal link references using the table produced
-- by correlateRefs.
replaceRefs :: [(String,String)] -> [Block] -> [Block]
replaceRefs refTable = walk replaceOneRef
where replaceOneRef x@(Link lab ('#':xs,tit)) =
case lookup xs refTable of
Just url -> Link lab (url,tit)
Nothing -> x
replaceOneRef x = x
-- Variant of normalizeDate that allows partial dates: YYYY, YYYY-MM
normalizeDate' :: String -> Maybe String
normalizeDate' xs =
let xs' = trim xs in
case xs' of
[y1,y2,y3,y4] | all isDigit [y1,y2,y3,y4] -> Just xs' -- YYYY
[y1,y2,y3,y4,'-',m1,m2] | all isDigit [y1,y2,y3,y4,m1,m2] -- YYYY-MM
-> Just xs'
_ -> normalizeDate xs'
toRelator :: String -> Maybe String
toRelator x
| x `elem` relators = Just x
| otherwise = lookup (map toLower x) relatorMap
relators :: [String]
relators = map snd relatorMap
relatorMap :: [(String, String)]
relatorMap =
[("abridger", "abr")
,("actor", "act")
,("adapter", "adp")
,("addressee", "rcp")
,("analyst", "anl")
,("animator", "anm")
,("annotator", "ann")
,("appellant", "apl")
,("appellee", "ape")
,("applicant", "app")
,("architect", "arc")
,("arranger", "arr")
,("art copyist", "acp")
,("art director", "adi")
,("artist", "art")
,("artistic director", "ard")
,("assignee", "asg")
,("associated name", "asn")
,("attributed name", "att")
,("auctioneer", "auc")
,("author", "aut")
,("author in quotations or text abstracts", "aqt")
,("author of afterword, colophon, etc.", "aft")
,("author of dialog", "aud")
,("author of introduction, etc.", "aui")
,("autographer", "ato")
,("bibliographic antecedent", "ant")
,("binder", "bnd")
,("binding designer", "bdd")
,("blurb writer", "blw")
,("book designer", "bkd")
,("book producer", "bkp")
,("bookjacket designer", "bjd")
,("bookplate designer", "bpd")
,("bookseller", "bsl")
,("braille embosser", "brl")
,("broadcaster", "brd")
,("calligrapher", "cll")
,("cartographer", "ctg")
,("caster", "cas")
,("censor", "cns")
,("choreographer", "chr")
,("cinematographer", "cng")
,("client", "cli")
,("collection registrar", "cor")
,("collector", "col")
,("collotyper", "clt")
,("colorist", "clr")
,("commentator", "cmm")
,("commentator for written text", "cwt")
,("compiler", "com")
,("complainant", "cpl")
,("complainant-appellant", "cpt")
,("complainant-appellee", "cpe")
,("composer", "cmp")
,("compositor", "cmt")
,("conceptor", "ccp")
,("conductor", "cnd")
,("conservator", "con")
,("consultant", "csl")
,("consultant to a project", "csp")
,("contestant", "cos")
,("contestant-appellant", "cot")
,("contestant-appellee", "coe")
,("contestee", "cts")
,("contestee-appellant", "ctt")
,("contestee-appellee", "cte")
,("contractor", "ctr")
,("contributor", "ctb")
,("copyright claimant", "cpc")
,("copyright holder", "cph")
,("corrector", "crr")
,("correspondent", "crp")
,("costume designer", "cst")
,("court governed", "cou")
,("court reporter", "crt")
,("cover designer", "cov")
,("creator", "cre")
,("curator", "cur")
,("dancer", "dnc")
,("data contributor", "dtc")
,("data manager", "dtm")
,("dedicatee", "dte")
,("dedicator", "dto")
,("defendant", "dfd")
,("defendant-appellant", "dft")
,("defendant-appellee", "dfe")
,("degree granting institution", "dgg")
,("delineator", "dln")
,("depicted", "dpc")
,("depositor", "dpt")
,("designer", "dsr")
,("director", "drt")
,("dissertant", "dis")
,("distribution place", "dbp")
,("distributor", "dst")
,("donor", "dnr")
,("draftsman", "drm")
,("dubious author", "dub")
,("editor", "edt")
,("editor of compilation", "edc")
,("editor of moving image work", "edm")
,("electrician", "elg")
,("electrotyper", "elt")
,("enacting jurisdiction", "enj")
,("engineer", "eng")
,("engraver", "egr")
,("etcher", "etr")
,("event place", "evp")
,("expert", "exp")
,("facsimilist", "fac")
,("field director", "fld")
,("film director", "fmd")
,("film distributor", "fds")
,("film editor", "flm")
,("film producer", "fmp")
,("filmmaker", "fmk")
,("first party", "fpy")
,("forger", "frg")
,("former owner", "fmo")
,("funder", "fnd")
,("geographic information specialist", "gis")
,("honoree", "hnr")
,("host", "hst")
,("host institution", "his")
,("illuminator", "ilu")
,("illustrator", "ill")
,("inscriber", "ins")
,("instrumentalist", "itr")
,("interviewee", "ive")
,("interviewer", "ivr")
,("inventor", "inv")
,("issuing body", "isb")
,("judge", "jud")
,("jurisdiction governed", "jug")
,("laboratory", "lbr")
,("laboratory director", "ldr")
,("landscape architect", "lsa")
,("lead", "led")
,("lender", "len")
,("libelant", "lil")
,("libelant-appellant", "lit")
,("libelant-appellee", "lie")
,("libelee", "lel")
,("libelee-appellant", "let")
,("libelee-appellee", "lee")
,("librettist", "lbt")
,("licensee", "lse")
,("licensor", "lso")
,("lighting designer", "lgd")
,("lithographer", "ltg")
,("lyricist", "lyr")
,("manufacture place", "mfp")
,("manufacturer", "mfr")
,("marbler", "mrb")
,("markup editor", "mrk")
,("metadata contact", "mdc")
,("metal-engraver", "mte")
,("moderator", "mod")
,("monitor", "mon")
,("music copyist", "mcp")
,("musical director", "msd")
,("musician", "mus")
,("narrator", "nrt")
,("onscreen presenter", "osp")
,("opponent", "opn")
,("organizer of meeting", "orm")
,("originator", "org")
,("other", "oth")
,("owner", "own")
,("panelist", "pan")
,("papermaker", "ppm")
,("patent applicant", "pta")
,("patent holder", "pth")
,("patron", "pat")
,("performer", "prf")
,("permitting agency", "pma")
,("photographer", "pht")
,("plaintiff", "ptf")
,("plaintiff-appellant", "ptt")
,("plaintiff-appellee", "pte")
,("platemaker", "plt")
,("praeses", "pra")
,("presenter", "pre")
,("printer", "prt")
,("printer of plates", "pop")
,("printmaker", "prm")
,("process contact", "prc")
,("producer", "pro")
,("production company", "prn")
,("production designer", "prs")
,("production manager", "pmn")
,("production personnel", "prd")
,("production place", "prp")
,("programmer", "prg")
,("project director", "pdr")
,("proofreader", "pfr")
,("provider", "prv")
,("publication place", "pup")
,("publisher", "pbl")
,("publishing director", "pbd")
,("puppeteer", "ppt")
,("radio director", "rdd")
,("radio producer", "rpc")
,("recording engineer", "rce")
,("recordist", "rcd")
,("redaktor", "red")
,("renderer", "ren")
,("reporter", "rpt")
,("repository", "rps")
,("research team head", "rth")
,("research team member", "rtm")
,("researcher", "res")
,("respondent", "rsp")
,("respondent-appellant", "rst")
,("respondent-appellee", "rse")
,("responsible party", "rpy")
,("restager", "rsg")
,("restorationist", "rsr")
,("reviewer", "rev")
,("rubricator", "rbr")
,("scenarist", "sce")
,("scientific advisor", "sad")
,("screenwriter", "aus")
,("scribe", "scr")
,("sculptor", "scl")
,("second party", "spy")
,("secretary", "sec")
,("seller", "sll")
,("set designer", "std")
,("setting", "stg")
,("signer", "sgn")
,("singer", "sng")
,("sound designer", "sds")
,("speaker", "spk")
,("sponsor", "spn")
,("stage director", "sgd")
,("stage manager", "stm")
,("standards body", "stn")
,("stereotyper", "str")
,("storyteller", "stl")
,("supporting host", "sht")
,("surveyor", "srv")
,("teacher", "tch")
,("technical director", "tcd")
,("television director", "tld")
,("television producer", "tlp")
,("thesis advisor", "ths")
,("transcriber", "trc")
,("translator", "trl")
,("type designer", "tyd")
,("typographer", "tyg")
,("university place", "uvp")
,("videographer", "vdg")
,("witness", "wit")
,("wood engraver", "wde")
,("woodcutter", "wdc")
,("writer of accompanying material", "wam")
,("writer of added commentary", "wac")
,("writer of added lyrics", "wal")
,("writer of added text", "wat")
]
| nickbart1980/pandoc | src/Text/Pandoc/Writers/EPUB.hs | gpl-2.0 | 50,073 | 0 | 27 | 17,580 | 13,694 | 7,478 | 6,216 | 1,024 | 30 |
{-
# This file is part of matrix-arbitrary. #
# #
# matrix-arbitrary is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# matrix-arbitrary is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# A copy of the GNU General Public License resides in the `LICENSE` #
# file distributed along with matrix-arbitrary. #
# #
# Copyright 2012, Johannes Weiß <[email protected]> #
-}
-- vim: set fileencoding=utf8 :
| weissi/matrix-arbitrary | src/empty-file-template.hs | gpl-3.0 | 1,336 | 0 | 2 | 698 | 4 | 3 | 1 | 1 | 0 |
module Data.Ephys.Timeseries.Filter where
-- Order of constructor arguments supposed to match natural
-- language for specifying filters, e.g. "4th order Lowpass Eliptical filter"
data FilterSpec = FilterSpec Sharpness Response Family
deriving (Eq, Show)
data Response = LowPass Double
| HighPass Double
| BandPass Double Double
| BandStop Double Double
| Notch Double
deriving (Eq, Show)
data Sharpness = Order Integer
| PassStopRipple Double Double
deriving (Eq, Show)
data Family = Butterworth
| Elliptic
deriving (Eq, Show)
| imalsogreg/tetrode-ephys | lib/Data/Ephys/Timeseries/Filter.hs | gpl-3.0 | 676 | 0 | 6 | 224 | 126 | 73 | 53 | 15 | 0 |
module Hledger (
module Hledger.Data
,module Hledger.Read
,module Hledger.Reports
,module Hledger.Utils
)
where
import Hledger.Data
import Hledger.Read
import Hledger.Reports
import Hledger.Utils
| Lainepress/hledger | hledger-lib/Hledger.hs | gpl-3.0 | 257 | 0 | 5 | 82 | 50 | 32 | 18 | 9 | 0 |
module AllTests (tests) where
import Data.Aeson
import Data.Text hiding (length)
import Grapher
import Jparse
import Test.Tasty
import Test.Tasty.QuickCheck as QC
-- | Use a sized generator to generate a list of values whose combined size
-- matches the given number.
-- divideBetween :: (Int -> Gen a) -> Int -> Gen [a]
-- divideBetween f 0 = return []
-- divideBetween f n = do size <- choose (1, abs n)
-- head <- f size
-- tail <- divideBetween f (n - size)
-- return (head : tail)
-- you can use that to generate a list of recursive calls
-- data ASTId =
-- ASTId { name :: Text
-- , modu :: Text
-- , package :: Text
-- , dependencies :: Maybe [ASTId]
-- } deriving (Show, Generic, Eq)
geomList :: Gen a -> Gen [a]
geomList g = oneof [return <$> g, (:) <$> g <*> geomList g]
instance Arbitrary Text where
arbitrary = do
t <- fmap pack arbitrary
return t
instance Arbitrary ASTId where
arbitrary = do
r <- oneof [pure Nothing, Just <$> geomList arbitrary]
ASTId <$> arbitrary <*> arbitrary <*> arbitrary <*> pure r
tests :: TestTree
tests = testGroup "Tests" [properties]
properties :: TestTree
properties = testGroup "Properties" [QC.testProperty "parsed" prop_parse_correct,
QC.testProperty "extracted" prop_extract_correct]
prop_parse_correct :: ASTId -> Bool
prop_parse_correct x = (decode . encode $ x) == Just x
prop_extract_correct :: ASTId -> Bool
prop_extract_correct a@(ASTId _ _ _ Nothing ) = let (_, _ ,t) = extractGraphable a
in t == []
prop_extract_correct a@(ASTId _ _ _ (Just x) ) = let (_, _ ,t) = extractGraphable a
in (length x) == length t
| ouanixi/order-deps | test-suite/AllTests.hs | gpl-3.0 | 1,953 | 0 | 12 | 664 | 419 | 225 | 194 | 29 | 1 |
-- | Unify support for type ASTs
{-# LANGUAGE NoImplicitPrelude #-}
module Lamdu.Infer.Internal.Unify
( unifyUnsafe
) where
import Prelude.Compat
import Control.Lens.Operators
import Control.Monad (when, unless)
import Control.Monad.Trans.Class (lift)
import Control.Monad.Trans.State (StateT, evalStateT)
import qualified Control.Monad.Trans.State as State
import qualified Data.Foldable as Foldable
import Data.Map (Map)
import qualified Data.Map as Map
import qualified Data.Set as Set
import Lamdu.Expr.FlatComposite (FlatComposite(..))
import qualified Lamdu.Expr.FlatComposite as FlatComposite
import Lamdu.Expr.Type (Type)
import qualified Lamdu.Expr.Type as T
import qualified Lamdu.Expr.TypeVars as TV
import qualified Lamdu.Infer.Error as Err
import Lamdu.Infer.Internal.Monad (Infer)
import qualified Lamdu.Infer.Internal.Monad as M
import Lamdu.Infer.Internal.Scope (SkolemScope(..))
import qualified Lamdu.Infer.Internal.Scope as Scope
import Lamdu.Infer.Internal.Subst (Subst, CanSubst)
import qualified Lamdu.Infer.Internal.Subst as Subst
import Text.PrettyPrint.HughesPJClass (Pretty(..))
{-# INLINE unifyUnsafe #-}
unifyUnsafe :: Type -> Type -> Infer ()
unifyUnsafe = unifyGeneric
varBind :: (Eq t, M.VarKind t, Pretty t) => T.Var t -> t -> Infer ()
varBind u t
| mtv == Just u = return ()
| otherwise =
do
allSkolems <- M.getSkolems
let tSkolems = TV.intersection allSkolems tFree
let TV.TypeVars tvs rtvs stvs = tFree `TV.difference` tSkolems
case (TV.member u allSkolems, mtv) of
(False, _) ->
do
uAllowedSkolems <- M.getSkolemsInScope u
let narrow nonSkolems =
mapM_ (M.narrowTVScope uAllowedSkolems)
(Set.toList nonSkolems)
narrow tvs >> narrow rtvs >> narrow stvs
let unallowedSkolems =
tSkolems `TV.difference`
(uAllowedSkolems ^. Scope.skolemScopeVars)
unless (TV.null unallowedSkolems) $
M.throwError Err.SkolemEscapesScope
-- in my scope: tSkolems
when (u `TV.member` tFree) $
M.throwError $ Err.OccursCheckFail (pPrint u) (pPrint t)
M.tellSubst u t
(True, Nothing) -> M.throwError $ Err.SkolemNotPolymorphic (pPrint u) (pPrint t)
(True, Just tv)
| TV.member tv allSkolems -> M.throwError $ Err.SkolemsUnified (pPrint u) (pPrint t)
| otherwise ->
do
SkolemScope tvAllowedSkolems <- M.getSkolemsInScope tv
unless (u `TV.member` tvAllowedSkolems) $
M.throwError Err.SkolemEscapesScope
M.tellSubst tv (TV.lift u)
where
tFree = TV.free t
mtv = TV.unlift t
class CanSubst t => Unify t where
unifyGeneric :: t -> t -> Infer ()
closedRecord :: Map T.Tag Type -> T.Composite p
closedRecord fields = FlatComposite.toComposite (FlatComposite fields Nothing)
unifyFlatToPartial ::
M.CompositeHasVar p =>
Subst -> (Map T.Tag Type, T.Var (T.Composite p)) -> Map T.Tag Type ->
Infer ()
unifyFlatToPartial s (tfields, tname) ufields
| not (Map.null uniqueTFields) =
M.throwError $
Err.TypesDoNotUnity
(pPrint (FlatComposite.toComposite (FlatComposite tfields (Just tname))))
(pPrint (closedRecord ufields))
| otherwise =
varBind tname $
Subst.apply s $
FlatComposite.toComposite $ FlatComposite uniqueUFields Nothing
where
uniqueTFields = tfields `Map.difference` ufields
uniqueUFields = ufields `Map.difference` tfields
unifyFlatPartials ::
M.CompositeHasVar p =>
Subst ->
(Map T.Tag Type, T.Var (T.Composite p)) ->
(Map T.Tag Type, T.Var (T.Composite p)) ->
Infer ()
unifyFlatPartials s0 (tfields, tname) (ufields, uname) =
do
tScope <- M.getSkolemsInScope tname
uScope <- M.getSkolemsInScope uname
restTv <- M.freshInferredVar (tScope `Scope.skolemScopeIntersection` uScope) "r"
((), s1) <-
M.listenSubst $ varBind tname $
Subst.apply s0 $
Map.foldWithKey T.CExtend restTv uniqueUFields
varBind uname $ Subst.apply (mappend s0 s1) $
Map.foldWithKey T.CExtend restTv uniqueTFields
where
uniqueTFields = tfields `Map.difference` ufields
uniqueUFields = ufields `Map.difference` tfields
unifyFlatFulls ::
Map T.Tag Type -> Map T.Tag Type -> Infer ()
unifyFlatFulls tfields ufields
| Map.keys tfields /= Map.keys ufields =
M.throwError $
Err.TypesDoNotUnity
(pPrint (closedRecord tfields))
(pPrint (closedRecord ufields))
| otherwise = return mempty
unifyChild :: Unify t => t -> t -> StateT Subst Infer ()
unifyChild t u =
do
old <- State.get
((), s) <- lift $ M.listenSubst $ unifyGeneric (Subst.apply old t) (Subst.apply old u)
State.put (old `mappend` s)
unifyIntersection :: (Unify a, Ord k) => Map k a -> Map k a -> Infer ()
unifyIntersection tfields ufields =
(`evalStateT` mempty) . Foldable.sequence_ $
Map.intersectionWith unifyChild tfields ufields
unifyFlattened :: M.CompositeHasVar p => FlatComposite p -> FlatComposite p -> Infer ()
unifyFlattened
(FlatComposite tfields tvar)
(FlatComposite ufields uvar) =
do
((), s) <- M.listenSubst $ unifyIntersection tfields ufields
case (tvar, uvar) of
(Nothing , Nothing ) -> unifyFlatFulls tfields ufields
(Just tname, Just uname) -> unifyFlatPartials s (tfields, tname) (ufields, uname)
(Just tname, Nothing ) -> unifyFlatToPartial s (tfields, tname) ufields
(Nothing , Just uname) -> unifyFlatToPartial s (ufields, uname) tfields
dontUnify :: Pretty t => t -> t -> Infer ()
dontUnify x y =
M.throwError $ Err.TypesDoNotUnity (pPrint x) (pPrint y)
instance Unify Type where
unifyGeneric (T.TFun l r) (T.TFun l' r') =
do
((), s1) <- M.listenSubst $ unifyGeneric l l'
unifyGeneric
(Subst.apply s1 r)
(Subst.apply s1 r')
unifyGeneric (T.TInst c0 p0) (T.TInst c1 p1)
| c0 == c1 && Map.keys p0 == Map.keys p1 = unifyIntersection p0 p1
unifyGeneric (T.TVar u) t = varBind u t
unifyGeneric t (T.TVar u) = varBind u t
unifyGeneric (T.TRecord x) (T.TRecord y) = unifyGeneric x y
unifyGeneric (T.TSum x) (T.TSum y) = unifyGeneric x y
unifyGeneric (T.TPrim x) (T.TPrim y) | x == y = return ()
unifyGeneric t1 t2 = dontUnify t1 t2
instance M.CompositeHasVar p => Unify (T.Composite p) where
unifyGeneric T.CEmpty T.CEmpty = return ()
unifyGeneric (T.CVar u) t = varBind u t
unifyGeneric t (T.CVar u) = varBind u t
unifyGeneric
t@(T.CExtend f0 t0 r0)
u@(T.CExtend f1 t1 r1)
| f0 == f1 =
do
((), s) <- M.listenSubst $ unifyGeneric t0 t1
unifyGeneric (Subst.apply s r0) (Subst.apply s r1)
| otherwise =
unifyFlattened
(FlatComposite.fromComposite t)
(FlatComposite.fromComposite u)
unifyGeneric t1 t2 = dontUnify t1 t2
| da-x/Algorithm-W-Step-By-Step | Lamdu/Infer/Internal/Unify.hs | gpl-3.0 | 7,879 | 0 | 19 | 2,555 | 2,498 | 1,274 | 1,224 | 167 | 4 |
module UCeuler2
( solve
) where
-- With an anonymus function that takes a pair (a, b) in a tuple and
-- returns a new tuple with (b (+ a b)).
-- We iterate over that function, i.e the output is the new input.
-- The first position in each pair is the Fibonacci sequence
-- that is map(ped) to a new lazy sequence
-- The $ operator is for avoiding parenthesis. Anything appearing
-- after it will take precedence over anything that comes before.
fibonacci :: [Integer]
fibonacci = map fst $ iterate (\(a,b) -> (b,a+b)) (0,1)
-- create new lazy list of even fibonacci numbers
even_fibonacci :: [Integer]
even_fibonacci = [ x | x <- fibonacci, (x `mod` 2==0) ]
solve :: Int -> [Char]
solve ucid = "Solved UC "++show(ucid)++": Result is: "++show(sum (takeWhile (<4000000) even_fibonacci)) | tedhag/teuler | haskell/rest-euler/src/UCeuler2.hs | gpl-3.0 | 791 | 0 | 11 | 145 | 172 | 101 | 71 | 8 | 1 |
--------------------------------------------------------------------------------
-- This file is part of diplomarbeit ("Diplomarbeit Johannes Weiß"). --
-- --
-- diplomarbeit is free software: you can redistribute it and/or modify --
-- it under the terms of the GNU General Public License as published by --
-- the Free Software Foundation, either version 3 of the License, or --
-- (at your option) any later version. --
-- --
-- diplomarbeit is distributed in the hope that it will be useful, --
-- but WITHOUT ANY WARRANTY; without even the implied warranty of --
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --
-- GNU General Public License for more details. --
-- --
-- You should have received a copy of the GNU General Public License --
-- along with diplomarbeit. If not, see <http://www.gnu.org/licenses/>. --
-- --
-- Copyright 2012, Johannes Weiß --
--------------------------------------------------------------------------------
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE BangPatterns #-}
module Math.Polynomials (monomial, horner) where
import Data.List (foldl')
-- | Build a polynomial using /Horner's rule/.
horner :: (Num a)
=> a -- ^ The polynomial's variable, such as /x/
-> [a] -- ^ The polynomial's coefficients.
-> a -- ^ Polynomial built using /Horner's rule/.
horner x = foldr (\a b -> a + b*x) 0
-- | Build a polynomial in monomial form.
monomial :: forall a. Num a
=> a -- ^ The polynomial's variable, such as /x/
-> [a] -- ^ The polynomial's coefficients.
-> a -- ^ Resulting Polynomial
monomial x coeffs =
let powers = [0..] :: [Integer]
powersOfX = map (\p -> x^p) powers
exprTuples :: [(a, a)]
exprTuples = zip coeffs powersOfX
exprs :: [a]
exprs = map (uncurry (*)) exprTuples
in foldl' (+) (fromInteger 0) exprs
| weissi/diplomarbeit | lib/Math/Polynomials.hs | gpl-3.0 | 2,372 | 0 | 12 | 885 | 253 | 153 | 100 | 21 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Drive.Revisions.Get
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Gets a revision\'s metadata or content by ID.
--
-- /See:/ <https://developers.google.com/drive/ Drive API Reference> for @drive.revisions.get@.
module Network.Google.Resource.Drive.Revisions.Get
(
-- * REST Resource
RevisionsGetResource
-- * Creating a Request
, revisionsGet
, RevisionsGet
-- * Request Lenses
, rggAcknowledgeAbuse
, rggFileId
, rggRevisionId
) where
import Network.Google.Drive.Types
import Network.Google.Prelude
-- | A resource alias for @drive.revisions.get@ method which the
-- 'RevisionsGet' request conforms to.
type RevisionsGetResource =
"drive" :>
"v3" :>
"files" :>
Capture "fileId" Text :>
"revisions" :>
Capture "revisionId" Text :>
QueryParam "acknowledgeAbuse" Bool :>
QueryParam "alt" AltJSON :> Get '[JSON] Revision
:<|>
"drive" :>
"v3" :>
"files" :>
Capture "fileId" Text :>
"revisions" :>
Capture "revisionId" Text :>
QueryParam "acknowledgeAbuse" Bool :>
QueryParam "alt" AltMedia :>
Get '[OctetStream] Stream
-- | Gets a revision\'s metadata or content by ID.
--
-- /See:/ 'revisionsGet' smart constructor.
data RevisionsGet = RevisionsGet'
{ _rggAcknowledgeAbuse :: !Bool
, _rggFileId :: !Text
, _rggRevisionId :: !Text
} deriving (Eq,Show,Data,Typeable,Generic)
-- | Creates a value of 'RevisionsGet' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'rggAcknowledgeAbuse'
--
-- * 'rggFileId'
--
-- * 'rggRevisionId'
revisionsGet
:: Text -- ^ 'rggFileId'
-> Text -- ^ 'rggRevisionId'
-> RevisionsGet
revisionsGet pRggFileId_ pRggRevisionId_ =
RevisionsGet'
{ _rggAcknowledgeAbuse = False
, _rggFileId = pRggFileId_
, _rggRevisionId = pRggRevisionId_
}
-- | Whether the user is acknowledging the risk of downloading known malware
-- or other abusive files. This is only applicable when alt=media.
rggAcknowledgeAbuse :: Lens' RevisionsGet Bool
rggAcknowledgeAbuse
= lens _rggAcknowledgeAbuse
(\ s a -> s{_rggAcknowledgeAbuse = a})
-- | The ID of the file.
rggFileId :: Lens' RevisionsGet Text
rggFileId
= lens _rggFileId (\ s a -> s{_rggFileId = a})
-- | The ID of the revision.
rggRevisionId :: Lens' RevisionsGet Text
rggRevisionId
= lens _rggRevisionId
(\ s a -> s{_rggRevisionId = a})
instance GoogleRequest RevisionsGet where
type Rs RevisionsGet = Revision
type Scopes RevisionsGet =
'["https://www.googleapis.com/auth/drive",
"https://www.googleapis.com/auth/drive.appdata",
"https://www.googleapis.com/auth/drive.file",
"https://www.googleapis.com/auth/drive.metadata",
"https://www.googleapis.com/auth/drive.metadata.readonly",
"https://www.googleapis.com/auth/drive.photos.readonly",
"https://www.googleapis.com/auth/drive.readonly"]
requestClient RevisionsGet'{..}
= go _rggFileId _rggRevisionId
(Just _rggAcknowledgeAbuse)
(Just AltJSON)
driveService
where go :<|> _
= buildClient (Proxy :: Proxy RevisionsGetResource)
mempty
instance GoogleRequest (MediaDownload RevisionsGet)
where
type Rs (MediaDownload RevisionsGet) = Stream
type Scopes (MediaDownload RevisionsGet) =
Scopes RevisionsGet
requestClient (MediaDownload RevisionsGet'{..})
= go _rggFileId _rggRevisionId
(Just _rggAcknowledgeAbuse)
(Just AltMedia)
driveService
where _ :<|> go
= buildClient (Proxy :: Proxy RevisionsGetResource)
mempty
| rueshyna/gogol | gogol-drive/gen/Network/Google/Resource/Drive/Revisions/Get.hs | mpl-2.0 | 4,782 | 0 | 24 | 1,316 | 661 | 375 | 286 | 102 | 1 |
initials :: [Char] -> [Char] -> [Char]
initials (f:_) (l:_) = (f : ". ") ++ (l : ".")
| ekalosak/haskell-practice | helloWorld/baby2.hs | lgpl-3.0 | 86 | 0 | 7 | 19 | 65 | 36 | 29 | 2 | 1 |
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE EmptyCase #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE PackageImports #-}
{-# LANGUAGE PatternSynonyms #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
module TTG where
import Test.HUnit (Counts, Test (TestList), runTestTT)
import qualified Test.HUnit.Util as U (t)
------------------------------------------------------------------------------
{-
Trees That Grow
Shayan Najd, Simon Peyton Jones
https://www.microsoft.com/en-us/research/wp-content/uploads/2016/11/trees-that-grow.pdf
use type-level functions to enable (G)ADT extensibility
-}
------------------------------------------------------------------------------
type Var = String
data Typ = Int | Fun Typ Typ deriving Eq
-- p 5. not extensible
data Exp
= Lit Integer
| Var Var
| Ann Exp Typ -- type Ann(otation)
| Abs Var Exp
| App Exp Exp
------------------------------------------------------------------------------
{- p 6
EXTENSIBLE
ξ is a type index to ExpX.
ξ is known as "extension descriptor"
- describes which extension is in use
Each constructor has "extra" field
- field enables extending a constructor with extra fields
- type of field determined by type-level function
"extra" 'ExpX' constructor
- has one field of type XExp ξ
- used to extend data type with new constructors
-}
data ExpX ξ
-- place for new fields
-- v
= LitX (XLit ξ) Integer
| VarX (XVar ξ) Var
| AnnX (XAnn ξ) (ExpX ξ) Typ
| AbsX (XAbs ξ) Var (ExpX ξ)
| AppX (XApp ξ) (ExpX ξ) (ExpX ξ)
| ExpX (XExp ξ) -- place for new constructors
-- for specifying type
-- - of new fields
-- - of new constructors
type family XLit ξ
type family XVar ξ
type family XAnn ξ
type family XAbs ξ
type family XApp ξ
type family XExp ξ
------------------------------------------------------------------------------
{-
ExpUD : undecorated (UD) variant of ExpX
does not introduce any forms of extensions, so type-level mappings set to Void
the type instance declarations can be omitted
- without instances, 'XAnn UD' is irreducible: therefore an empty type just like Void
- but possible to accidentally add 'XAnn UD', so prevent by giving explicit instance
-}
data Void -- no inhabitants
void :: Void
void = error "Attempt to evaluate void"
absurd :: Void -> a
absurd m = case m of { }
data UD -- no inhabitants
type ExpUD = ExpX UD
type instance XLit UD = Void
type instance XVar UD = Void
type instance XAnn UD = Void
type instance XAbs UD = Void
type instance XApp UD = Void
type instance XExp UD = Void
------------------------------------------------------------------------------
{- p 7
Pattern Synonyms for Convenience
program with ExpX
– pattern matching : must ignore extra field
– constructing : must supply void in extra field
-}
incLit :: Exp -> Exp
incLit (Lit i ) = Lit (i + 1)
incLit e = e
incLitX :: ExpUD -> ExpUD
incLitX (LitX _ i) = LitX void (i + 1)
incLitX e = e
-- use pattern synonyms
pattern LitUD :: Integer -> ExpUD
-- bidirectional
-- | for matching | | for constructing |
pattern LitUD i <- LitX _ i where LitUD i = LitX void i
incLitX' :: ExpUD -> ExpUD
incLitX' (LitUD i) = LitUD (i + 1)
incLitX' e =e
{-
note: term value ExpX void has no counterpart in Exp
- Haskell lacks an entirely uninhabited type
- therefore, hide ExpX constructor from client users
-}
------------------------------------------------------------------------------
{- p 8 NEW FIELD EXTENSION : e.g., hold TypeCheck (TC) info on App(lication)
-}
data TC
type ExpTC = ExpX TC
type instance XLit TC = Void
type instance XVar TC = Void
type instance XAnn TC = Void
type instance XAbs TC = Void
type instance XApp TC = Typ -- enables extra field of this type
type instance XExp TC = Void
pattern LitTC :: Integer -> ExpTC
pattern LitTC i <- LitX _ i where LitTC i = LitX void i
pattern VarTC :: Var -> ExpTC
pattern VarTC v <- VarX _ v where VarTC v = VarX void v
pattern AnnTC :: ExpTC -> Typ -> ExpTC
pattern AnnTC e t <- AnnX _ e t where AnnTC e t = AnnX void e t
pattern AbsTC :: Var-> ExpTC-> ExpTC
pattern AbsTC l r <- AbsX _ l r where AbsTC l r = AbsX void l r
pattern AppTC :: Typ -> ExpTC -> ExpTC -> ExpTC
pattern AppTC a l m <- AppX a l m where AppTC l m = AppX l m
{- p 9 FUNCTIONS ON EXTENDED DATA TYPES
e.g., type check
-}
check :: ExpTC -> [(Var,Typ)] -> Typ -> Bool
check (LitTC _) _ Int = True
check (VarTC v) env c = (== Just c) (lookup v env)
check (AnnTC e t) env c = t == c && check e env c
check (AbsTC v e) env (Fun a b) = check e ((v,a):env) b
check (AppTC t f a) env c = check f env (Fun t c) && check a env t
check _ _ _ = False -- GHC does not yet know when synonyms are exhaustive
ttc :: [Test]
ttc = U.t "ttc"
(all
(True==)
[ check (LitTC 3) [] Int
, check (VarTC "x") [("x",Int)] Int
, not $ check (VarTC "x") [("x",Fun Int Int)] Int
, not $ check (VarTC "x") [] Int
, check (AnnTC (VarTC "x") Int) [("x",Int)] Int
, check (AbsTC "x" (VarTC "x")) [] (Fun Int Int)
, check (AppTC Int (AbsTC "x" (VarTC "x")) (LitTC 3)) [] Int
])
True
------------------------------------------------------------------------------
{- p 8 NEW CONSTRUCTOR EXTENSION : e.g., hold TypeCheck (TC) info on App(lication)
e.g.,
- partial evaluation (PE) pass over trees: β-redices normalised away
- after reducing, PE stores value as node in tree
- stored in new contructor in non extensible version
data Val = ...
data Exp = ... | Val Val
- extensible version will define new constructor ValPE
-}
newtype Val = Val ExpPE -- deriving Show -- not done just because need to do show for ExrPE
data PE
type ExpPE = ExpX PE
type instance XLit PE = Void
type instance XVar PE = Void
type instance XAnn PE = Void
type instance XAbs PE = Void
type instance XApp PE = Void
type instance XExp PE = Val
-- represents new constructor introduced by extension
pattern ValPE :: Val -> ExpPE
pattern ValPE v = ExpX v
------------------------------------------------------------------------------
{- p 10 Generic Functions on Extensible Data Types
to define generic functions: use common structure of extensible data type
e.g., generic print
- print that ignores new field : works same for both ExpUD and ExpTC
- new constructor extensions
- could ignore, or
- pass funcetion to to handle new constructors (done below)
- could have used type classes
ExpUD and ExpTC have no new constructors
- args passed to generic printE matches empty types
ExpPE
- pass print fun for new constructor 'Val'
-}
printT :: Typ -> String
printT Int = "Int"
printT (Fun a b) = "(" ++ printT a ++ ") -> " ++ printT b
printE :: (XExp ξ -> String) -> ExpX ξ -> String
printE _ (LitX _ i) = show i
printE _ (VarX _ x) = x
printE p (AnnX _ m a) = "(" ++ printE p m ++ ") :: (" ++ printT a ++ ")"
printE p (AbsX _ x n) = "λ" ++ x ++ "." ++ printE p n
printE p (AppX _ l m) = "(" ++ printE p l ++ ")(" ++ printE p m ++ ")"
printE p (ExpX ξ) = p ξ
printEUD :: ExpUD -> String
printEUD = printE absurd
printETC :: ExpTC -> String
printETC = printE absurd
-- printEPE :: ExpPE -> String -- not done (see above about Show ExrPE)
-- printEPE = printE p where p v = "{{" ++ show v ++ "}}"
tpl,tpv,tpan,tpab,tpap::[Test]
tpl = U.t "tpl" (printE undefined (LitTC 3)) "3"
tpv = U.t "tpv" (printE undefined (VarTC "x")) "x"
tpan = U.t "tpan" (printE undefined (AnnTC (VarTC "x") Int)) "(x) :: (Int)"
tpab = U.t "tpab" (printE undefined (AbsTC "x" (VarTC "x"))) "\955x.x"
tpap = U.t "tpap" (printE undefined (AppTC Int (AbsTC "x" (VarTC "x")) (LitTC 3))) "(\955x.x)(3)"
------------------------------------------------------------------------------
{- p 11 Type Classes for Extensible Data Types
To print field extensions, could pass fun args for all field extensions:
printE :: (XLit ξ → String) → (XVar ξ → String) → (XAnn ξ → String)
→ (XAbs ξ → String) → (XApp ξ → String) → (XExp ξ → String)
→ ExpX ξ → String
Alternative via type classes.
-}
instance ( Show (XLit ξ), Show (XVar ξ), Show (XAnn ξ)
, Show (XAbs ξ), Show (XApp ξ), Show (XExp ξ)) => Show (ExpX ξ) where
show = undefined
{-
then no explicit args necessary.
Use ConstraintKinds can abstract over the constraint
type forall x . (φ :: ∗ -> Constraint) ξ
= ( φ (XLit ξ), φ (XVar ξ), φ (XAnn ξ)
, φ (XAbs ξ), φ (XApp ξ), φ (XExp ξ)
)
then header of previous instance becomes
instance forall X Show ξ => Show (ExpX ξ) where
show = ...
can use Haskell’s standalone deriving
deriving instance forall X . Show ξ => Show (ExpX ξ)
-}
------------------------------------------------------------------------------
test :: IO Counts
test =
runTestTT $ TestList $
ttc ++
tpl ++ tpv ++ tpan ++ tpab ++ tpap
| haroldcarr/learn-haskell-coq-ml-etc | haskell/topic/trees-that-grow-and-shrink/2017-09-spj-trees-that-grow/TTG.hs | unlicense | 9,699 | 0 | 15 | 2,520 | 2,088 | 1,125 | 963 | -1 | -1 |
module Functions where
j :: Monad m => m (m a) -> m a
j m = m >>= id
l1 :: Monad m => (a -> b) -> m a -> m b
l1 = fmap
l2 :: Monad m => (a -> b -> c) -> m a -> m b -> m c
l2 f ma mb = f <$> ma >>= (<$> mb)
a :: Monad m => m a -> m (a -> b) -> m b
--a ma mf = mf >>= (\f -> ma >>= (\m -> return $ f m))
a = (=<<) . flip fmap
meh :: Monad m => [a] -> (a -> m b) -> m [b]
meh as f = foldr (l2 (:)) (return []) (f <$> as)
meh' :: Monad m => [a] -> (a -> m b) -> m [b]
meh' as f = flipType' $ fmap f as
flipType :: (Monad m) => [m a] -> m [a]
flipType = flip meh id
flipType' :: (Monad m) => [m a] -> m [a]
flipType' = foldr (l2 (:)) (return [])
| thewoolleyman/haskellbook | 18/07/haskell-club/Functions.hs | unlicense | 650 | 0 | 10 | 194 | 434 | 222 | 212 | 17 | 1 |
{-
Copyright 2015 Tristan Aubrey-Jones
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-}
{-|
Copyright : (c) Tristan Aubrey-Jones, 2015
License : Apache-2
Maintainer : [email protected]
Stability : experimental
For more information please see <http://www.flocc.net/>
-}
module Compiler.Back.ControlTemplates (ctrlTemplates) where
import qualified Data.Map as DM
import Control.Monad.State.Strict (gets)
import Compiler.Back.Graph
import Compiler.Back.GenDecls
import Compiler.Back.Gen
import Compiler.Back.Helper
import Compiler.Back.Templates
import Control.Monad.Catch
ctrlTemplates :: (Monad m, MonadCatch m) => [(Id, Template m)]
ctrlTemplates = [
("ifFun", ifT),
("loop", t01)
]
-- |ifFun template (for conditionals)
ifT :: (Monad m, MonadCatch m) => Template m
ifT (Tup [t1, (t2 :-> t3), (t4 :-> t5)] :-> t6)
(LFun _ (LTup _ [predN, thenN, elseN]) thisN)
| t1 == boolTy && match nullTy [t2,t4] &&
match t6 [t3,t5] = do
-- get predicate var
getVar (Lf "predV") predN outVarName
-- declare result var
ifnVar "decOut" outVarName (Lf "res") t3
-- gen then/else blocks
newVar (Lf "nullV") nullTy
genFunV "thenCode" thenN (Lf "nullV") n0 (Lf "res")
genFunV "elseCode" elseN (Lf "nullV") n0 (Lf "res")
outputDecl thisN "<decOut>"
-- gen if block
setFun thisN "gen" nt (\_ -> do
-- outputDecl thisN "<decOut>"
output "main" $
"// begin <tem>\n" ++
"if (<predV>) {\n<thenCode>\n} else {\n<elseCode>\n}\n"++
"// end <tem>\n"
return ())
ifT t n = terr' t n
-- |while template
-- |TODO copy public members from output of nextf to input of nextf?
t01 :: (Monad m, MonadCatch m) => Template m
t01 (Tup [st1 :-> st2, st3 :-> boolTy, st4] :-> st5)
(LFun _ (LTup _ [nextf, predf, v0]) out)
| match (ignoreFunsTy st1) (map ignoreFunsTy [st2, st3, st4, st5]) = do
-- get init val
getVar (Lf "v0") v0 outVarName
-- TODO get everything in v0's public environment other than the outVar and streamVar
-- and pass it to genFunV calls
-- buffer vars
-- newVar (Lf "v1") st1 --
ifnVar "decOut" outVarName (Lf "v1") st1
newVar (Lf "v2") st1
--runGenV "declareVar" "decBuffs" [Tup [Lf "v1", Lf "v2"]]
runGenV "declareVar" "decBuffs" [Lf "v2"]
genFunV "appNext0" nextf (Lf "v0") v0 (Lf "v1")
genFunV "appNext1" nextf (Lf "v1") v0 (Lf "v2")
genFunV "appNext2" nextf (Lf "v2") v0 (Lf "v1")
runGenV "assignVar" "copyBuff" [Lf "v1", Lf "v2"]
runGenV "assignVar" "copyBuff2" [Lf "v1", Lf "v0"]
-- predicate var
newVar (Lf "predV") boolTy
runGenV "declareVar" "decPredV" [Lf "predV"]
genFunV "appPred0" predf (Lf "v0") v0 (Lf "predV")
genFunV "appPred1" predf (Lf "v1") v0 (Lf "predV")
genFunV "appPred2" predf (Lf "v2") v0 (Lf "predV")
-- get env vars (appart from outVar and streamVar) from v0
-- and pass along/make public here (e.g. vecmapType)
-- get node to import env from
inEnv <- (do
publicEnvs <- gets genObjMembers ;
return $ (lookupNode ("loop template: can't find env for input node " ++ (show v0)) (fst $ treeLabel v0) publicEnvs) `DM.difference` varsToExclude)
mapM (\(vid, val) -> setVal out vid val) $ DM.toList inEnv
--runGenV "declareVar"
-- create output var if doesn't already exist
-- decName publicName localName type
--ifnVar "decOut" outVarName (Lf "") t3
-- when gen is called, generate assignment
setFun out "gen" nt (\_ -> do
output "main" $
"// begin <tem>\n"++
--"<decPredV>\n<appPred0>\n<decBuffs>\n<decOut>\nif (<predV>) {\n<appNext0>\n} else {\n<copyBuff2>\n}\n"++
"<decPredV><appPred0>\n"++
"<decBuffs>\n<decOut>\n"++
"if (<predV>) {\n"++
" <appNext0>\n"++
" <appPred1>\n"++
" while (<predV>) {\n"++
" <appNext1>\n"++
" <appPred2>\n"++
" if (!<predV>) {\n"++
" <copyBuff>\n"++
" break;\n"++
" }\n"++
" <appNext2>\n"++
" <appPred1>\n"++
" }\n"++
"}\n"++
"// end <tem>\n"
return ())
t01 t n = terr' t n
| flocc-net/flocc | v0.1/Compiler/Back/ControlTemplates.hs | apache-2.0 | 4,775 | 0 | 31 | 1,228 | 1,095 | 562 | 533 | 75 | 1 |
{- |
Module : Bio.Motions.Callback.Class
Description : Contains the definitions of various 'Callback'-related primitives.
License : Apache
Stability : experimental
Portability : unportable
-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE DefaultSignatures #-}
{-# LANGUAGE FunctionalDependencies #-}
module Bio.Motions.Callback.Class where
import Bio.Motions.Types
import Bio.Motions.Representation.Class
import Bio.Motions.Callback.Serialisation
import Data.Proxy
import Control.DeepSeq
type Callbacks = ([CallbackResult 'Pre], [CallbackResult 'Post])
-- |Represents the mode of a callback
data Mode = Pre -- ^Such a callback will be fired before a move is made
| Post -- ^Such a callback will be fired after a move is made
-- |Represents a callback
class (Show cb, CallbackSerialisable cb, NFData cb) => Callback (mode :: Mode) cb | cb -> mode where
-- |A human-readable name of the callback.
callbackName :: proxy cb -> String
-- |Computes the callback's result from scratch.
runCallback :: (Monad m, CallbackRepresentation m repr)
=> repr
-- ^The representation.
-> m cb
-- ^The computed value.
-- |Computes the callback's result after a move.
updateCallback :: (Monad m, CallbackRepresentation m repr)
=> repr
-- ^The representation before/after the move. See 'Mode'.
-> cb
-- ^The previous value.
-> Move
-- ^A move that is about to be/was made. See 'Mode'.
-> m cb
-- ^The new value.
default updateCallback :: (Monad m, CallbackRepresentation m repr, mode ~ 'Post)
=> repr -> cb -> Move -> m cb
updateCallback repr _ _ = runCallback repr
{-# INLINEABLE updateCallback #-}
-- |An existential wrapper around a 'Callback''s result.
data CallbackResult mode where
CallbackResult :: (Callback mode cb) => !cb -> CallbackResult mode
instance NFData (CallbackResult mode) where
rnf (CallbackResult cb) = rnf cb
-- |An existential wrapper around a 'Callback''s type.
data CallbackType mode where
CallbackType :: Callback mode cb => Proxy cb -> CallbackType mode
getCallbackName :: forall cb mode. Callback mode cb => cb -> String
getCallbackName _ = callbackName (Proxy :: Proxy cb)
-- |Runs a 'Callback' in a monad and returns the result.
getCallbackResult :: forall m repr mode. (Monad m, CallbackRepresentation m repr) =>
repr -> CallbackType mode -> m (CallbackResult mode)
getCallbackResult repr (CallbackType (_ :: Proxy cb)) = CallbackResult <$> (runCallback repr :: m cb)
{-# INLINEABLE getCallbackResult #-}
-- |Runs all 'Callback's in a list and returns the list of results.
getCallbackResults :: (Traversable t, Monad m, CallbackRepresentation m repr) =>
repr -> t (CallbackType mode) -> m (t (CallbackResult mode))
getCallbackResults = traverse . getCallbackResult
{-# INLINEABLE getCallbackResults #-}
-- |Updates a 'Callback''s result in a monad after a move.
updateCallbackResult :: (Monad m, CallbackRepresentation m repr) =>
repr -> Move -> CallbackResult mode -> m (CallbackResult mode)
updateCallbackResult repr move (CallbackResult cb) = CallbackResult <$> updateCallback repr cb move
{-# INLINEABLE updateCallbackResult #-}
-- |An alias for a particularily important class of callbacks, viz. score functions.
-- TODO: better serializability constraint
-- TODO: remove Integral
type Score cb = (Callback 'Pre cb, Num cb, Ord cb, Integral cb, Show cb)
| Motions/motions | src/Bio/Motions/Callback/Class.hs | apache-2.0 | 3,693 | 0 | 12 | 718 | 707 | 390 | 317 | 52 | 1 |
module Data.Real.Base (Base, approxBase, powers, sumBase) where
import Data.Real.Gauge
import Data.Ratio
import Data.Bits
import Test.QuickCheck
import GHC.Real
type Base = Rational
approxBase :: Base -> Gauge -> Base
approxBase x e | 0 < err = (round $ x*(fromIntegral ((1::Integer) `shiftL` (err)))) %
(1 `shiftL` (err))
| otherwise = fromInteger $
(round $ x/(fromIntegral ((1::Integer) `shiftL` (-err)))) `shiftL` (-err)
where
(n,d) = (numerator e, denominator e)
err = (bitLength (d-1))-(bitLength n)
prop_approxBase x e = e /= 0 ==> abs (x - y) <= (abs e)
where
y = approxBase x (abs e)
powers :: Base -> [Base]
powers x = zipWith (:%) (iterate (*numerator x) 1)
(iterate (*denominator x) 1)
bitLength x' | x == 0 = -1
| otherwise = bitL 0 $ until (\y -> x < (1 `shiftL` y)) (2*) 1
where
x = abs x'
bitL low high | good low = low
| good high = high
| upper mid = bitL low mid
| lower mid = bitL mid high
where
mid = (high+low) `div` 2
lower y = (1 `shiftL` y) <= x
upper y = x < (1 `shiftL` (y+1))
good y = lower y && upper y
prop_bitLength :: Integer -> Bool
prop_bitLength x' = (1 `shiftL` l) <= x && x < (1 `shiftL` (l+1))
where
l = bitLength x
x = (abs x')+1
prop_bitLength2 :: Integer -> Bool
prop_bitLength2 x' = (1 `shiftL` (l-1)) < x && x <= (1 `shiftL` l)
where
l = (bitLength (x-1))+1
x = (abs x')+1
sumBase :: [Base] -> Base
sumBase [] = 0
sumBase l = (sum [(lcd `div` d)*n |(n,d)<-l'])%lcd
where
l' = map (\x -> (numerator x,denominator x)) l
lcd = lcm' l'
lcm' [] = undefined
lcm' [(_,a)] = a
lcm' x = lcm (lcm' a) (lcm' b)
where
(a,b) = splitAt ((length x) `div`2) x
| robbertkrebbers/fewdigits | Data/Real/Base.hs | bsd-2-clause | 1,794 | 0 | 15 | 515 | 965 | 522 | 443 | -1 | -1 |
module BruijnEnvironment where
import qualified Data.IntMap as IM
import Control.Exception.Base
import Data.Maybe
import Data.List
-- add some test
-- TODO remove b prefix and give beter names
-- TODO consistend inuative order in BruijnEnv
-- defs / bfromlist is [2,1,0]
-- Reorder ?
-- | Bound is wrapper arround Int and is used to represent BruijnIndex.
-- BruijnIndex rever to a env, but are dependent on the surrounding terms.
-- If you add extra lambda:
--
-- * \\a.a ==> \\a.\\b.a
--
-- * \\0 ==> \\1
--
-- You have to modify the Inde
newtype Bound = Bound Int deriving (Eq, Show, Ord)
--TODO replace with list
--TODO Fix name to BruijnEnv
data BruijnEnv a = BruijnState
{ bruijnDepth :: Int
, bruijnMap :: IM.IntMap a
} deriving Eq
-- TODO maybe import to debug module
instance Show a => Show (BruijnEnv a) where
show env = '[' :
intercalate "," ( map showindex $ Bound <$> fromToZero (depth - 1))
++ "<" ++ show depth ++ "]"
where
depth = bruijnDepth env
showindex i = fromMaybe "_" $ show <$> bMaybeLookup i env
fromToZero :: Int -> [Int]
fromToZero n | n < 0 = []
| otherwise = n : fromToZero (pred n)
toInt :: Bound -> Int
toInt (Bound i) = i
bNull :: BruijnEnv a -> Bool
bNull BruijnState {bruijnDepth = 0} = True
bNull _ = False
bEmtyEnv :: BruijnEnv a
bEmtyEnv = BruijnState
{ bruijnDepth = 0
, bruijnMap = IM.empty
}
bMember :: Bound -> BruijnEnv a -> Bool
bMember b e = isJust $ bMaybeLookup b e
bLookup :: Bound -> BruijnEnv a -> a
bLookup (Bound i) BruijnState {bruijnDepth = depth, bruijnMap = m} =
m IM.! (depth - i - 1)
bMaybeLookup :: Bound -> BruijnEnv a -> Maybe a
bMaybeLookup (Bound i) BruijnState {bruijnDepth = depth, bruijnMap = m} =
IM.lookup (depth - i - 1) m
bLookupLT :: Bound -> BruijnEnv a -> Maybe (Bound, a)
bLookupLT (Bound i) BruijnState {bruijnDepth = depth, bruijnMap = m} =
case IM.lookupLE (depth -i -1) m of
Just (ik,a ) -> Just (Bound (depth-1-ik),a)
Nothing -> Nothing
bInsert :: a -> BruijnEnv a -> BruijnEnv a
bInsert a b@BruijnState {bruijnDepth = depth, bruijnMap = m} =
b {bruijnDepth = depth + 1, bruijnMap = IM.insert depth a m }
-- TODO test (bInserts [a] == bInsert a)
-- when env= bInserts [1,2,3] bEmtyEnv then bLookup Bound 0 will be 3; Bruij counts left to righ
bInserts :: [a] -> BruijnEnv a -> BruijnEnv a
bInserts list env = foldl' (flip bInsert) env list
bInsertBlackhole :: Int -> BruijnEnv a -> BruijnEnv a
bInsertBlackhole n env =
assert (n >= 0) $
env{bruijnDepth = bruijnDepth env +n}
-- TODO can remove duplcate code by using bInserts
bFromList :: [a] -> BruijnEnv a
bFromList = foldl' (flip bInsert) bEmtyEnv
-- TODO remove this
bToList :: BruijnEnv a -> [(Int, a)]
bToList BruijnState {bruijnMap = m} = IM.toList m
bReplace :: Bound -> a -> BruijnEnv a -> BruijnEnv a
bReplace (Bound i) a b@BruijnState {bruijnDepth = depth, bruijnMap = m} =
assert (i < depth) $
b {bruijnMap = IM.insert (depth - i - 1 ) a m}
bDelete :: Bound -> BruijnEnv a -> BruijnEnv a
bDelete (Bound i) b@BruijnState {bruijnDepth = depth, bruijnMap = m} =
b {bruijnMap = IM.delete(depth - i - 1 ) m}
-- TODO ??? could remove duplecate cate by using bSplitAt
bDrop :: Int -> BruijnEnv a -> BruijnEnv a
bDrop n b = assert (n >= 0 && n <= bruijnDepth b) b {bruijnDepth = newDepth, bruijnMap = newM}
where (newM, _) = IM.split newDepth (bruijnMap b)
newDepth = bruijnDepth b - n
bExtend :: Int -> BruijnEnv a -> BruijnEnv a
bExtend n = bDrop (-n)
bFilter :: (a -> Bool) -> BruijnEnv a -> BruijnEnv a
bFilter f env = env {bruijnMap = IM.filter f $ bruijnMap env}
bSize :: BruijnEnv a -> Int
bSize = IM.size . bruijnMap
bSplitAt :: Int -> BruijnEnv a -> (BruijnEnv a, [a])
bSplitAt n b = (b {bruijnDepth = newDepth, bruijnMap = low}, maybeToList pivot ++ map snd (IM.toAscList high))
where (low, pivot, high) = IM.splitLookup newDepth (bruijnMap b)
newDepth = bruijnDepth b - n
-- TODO test (bInsertAt 0 == bInsert)
-- TODO maybe use Bound
bInsertAt :: Int -> a -> BruijnEnv a -> BruijnEnv a
bInsertAt n a env = bInserts (a:right) left
where
(left,right) = bSplitAt n env
instance Functor BruijnEnv where
fmap f b = b {bruijnMap = fmap f (bruijnMap b)}
mapWithBound :: (Bound -> a -> b) -> BruijnEnv a -> BruijnEnv b
mapWithBound f b@BruijnState {bruijnDepth = dept, bruijnMap = m} =
b {bruijnMap = IM.mapWithKey (\ index a -> f (Bound $! dept - index - 1) a) m}
-- TODO add comments
bReorder :: BruijnEnv a -> Int -> [Bound] -> BruijnEnv a
bReorder env n order = foldl go env $ zip order [n ..]
where go envN (bi , j) = bReplace (Bound j) (bLookup bi env) envN
| kwibus/myLang | src/BruijnEnvironment.hs | bsd-3-clause | 4,705 | 0 | 15 | 1,051 | 1,720 | 907 | 813 | 86 | 2 |
{-# LANGUAGE CPP #-}
#if __GLASGOW_HASKELL__ >= 701
{-# LANGUAGE Trustworthy #-}
#endif
--
-- |
-- Module : Data.String.UTF8
-- Copyright : (c) Iavor S. Diatchki 2009
-- License : BSD3-style (see LICENSE)
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
{-# LANGUAGE MultiParamTypeClasses, UndecidableInstances #-}
module Data.String.UTF8
( -- * Representation
UTF8
, UTF8Bytes()
, fromString
, toString
, fromRep
, toRep
, G.replacement_char
-- * Character based operations
, uncons
, splitAt
, take
, drop
, span
, break
, foldl
, foldr
, length
, lines
, lines'
-- * Representation based operations
, null
, decode
, byteSplitAt
, byteTake
, byteDrop
) where
import Prelude hiding (null,take,drop,span,break
,foldl,foldr,length,lines,splitAt)
import qualified Codec.Binary.UTF8.Generic as G
import Codec.Binary.UTF8.Generic (UTF8Bytes)
-- | The type of strings that are represented using the UTF8 encoding.
-- The parameter is the type of the container for the representation.
newtype UTF8 string = Str string deriving (Eq,Ord) -- XXX: Is this OK?
instance UTF8Bytes string index => Show (UTF8 string) where
show x = show (toString x)
fromRep :: string -> UTF8 string
fromRep = Str
toRep :: UTF8 string -> string
toRep (Str x) = x
-- | Converts a Haskell string into a UTF8 encoded string.
-- Complexity: linear.
fromString :: UTF8Bytes string index => String -> UTF8 string
fromString xs = Str (G.fromString xs)
-- | Convert a UTF8 encoded string into a Haskell string.
-- Invalid characters are replaced by 'replacement_char'.
-- Complexity: linear.
toString :: UTF8Bytes string index => UTF8 string -> String
toString (Str xs) = G.toString xs
-- | Checks if there are no more bytes in the underlying representation.
null :: UTF8Bytes string index => UTF8 string -> Bool
null (Str x) = G.null x
-- | Split after a given number of characters.
-- Negative values are treated as if they are 0.
-- See also 'bytesSplitAt'.
splitAt :: UTF8Bytes string index
=> index -> UTF8 string -> (UTF8 string, UTF8 string)
splitAt x (Str bs) = case G.splitAt x bs of
(s1,s2) -> (Str s1, Str s2)
-- | Split after a given number of bytes in the underlying representation.
-- See also 'splitAt'.
byteSplitAt :: UTF8Bytes string index
=> index -> UTF8 string -> (UTF8 string, UTF8 string)
byteSplitAt n (Str x) = case G.bsplit n x of
(as,bs) -> (Str as, Str bs)
-- | Take only the given number of bytes from the underlying representation.
-- See also 'take'.
byteTake :: UTF8Bytes string index => index -> UTF8 string -> UTF8 string
byteTake n (Str x) = Str (fst (G.bsplit n x))
-- | Drop the given number of bytes from the underlying representation.
-- See also 'drop'.
byteDrop :: UTF8Bytes string index => index -> UTF8 string -> UTF8 string
byteDrop n (Str x) = Str (G.bdrop n x)
-- | @take n s@ returns the first @n@ characters of @s@.
-- If @s@ has less than @n@ characters, then we return the whole of @s@.
take :: UTF8Bytes string index => index -> UTF8 string -> UTF8 string
take n (Str bs) = Str (G.take n bs)
-- | @drop n s@ returns the @s@ without its first @n@ characters.
-- If @s@ has less than @n@ characters, then we return an empty string.
drop :: UTF8Bytes string index => index -> UTF8 string -> UTF8 string
drop n (Str bs) = Str (G.drop n bs)
-- | Split a string into two parts: the first is the longest prefix
-- that contains only characters that satisfy the predicate; the second
-- part is the rest of the string.
-- Invalid characters are passed as '\0xFFFD' to the predicate.
span :: UTF8Bytes string index
=> (Char -> Bool) -> UTF8 string -> (UTF8 string, UTF8 string)
span p (Str bs) = case G.span p bs of
(s1,s2) -> (Str s1, Str s2)
-- | Split a string into two parts: the first is the longest prefix
-- that contains only characters that do not satisfy the predicate; the second
-- part is the rest of the string.
-- Invalid characters are passed as 'replacement_char' to the predicate.
break :: UTF8Bytes string index
=> (Char -> Bool) -> UTF8 string -> (UTF8 string, UTF8 string)
break p (Str bs) = case G.break p bs of
(s1,s2) -> (Str s1, Str s2)
-- | Get the first character of a byte string, if any.
-- Invalid characters are replaced by 'replacement_char'.
uncons :: UTF8Bytes string index
=> UTF8 string -> Maybe (Char, UTF8 string)
uncons (Str x) = do (c,y) <- G.uncons x
return (c, Str y)
-- | Extract the first character for the underlying representation,
-- if one is available. It also returns the number of bytes used
-- in the representation of the character.
-- See also 'uncons', 'dropBytes'.
decode :: UTF8Bytes string index => UTF8 string -> Maybe (Char, index)
decode (Str x) = G.decode x
-- | Traverse a bytestring (right biased).
foldr :: UTF8Bytes string index => (Char -> a -> a) -> a -> UTF8 string -> a
foldr cons nil (Str cs) = G.foldr cons nil cs
-- | Traverse a bytestring (left biased).
-- This function is strict in the accumulator.
foldl :: UTF8Bytes string index => (a -> Char -> a) -> a -> UTF8 string -> a
foldl add acc (Str cs) = G.foldl add acc cs
-- | Counts the number of characters encoded in the bytestring.
-- Note that this includes replacement characters.
-- The function is linear in the number of bytes in the representation.
length :: UTF8Bytes string index => UTF8 string -> index
length (Str b) = G.length b
-- | Split a string into a list of lines.
-- Lines are terminated by '\n' or the end of the string.
-- Empty lines may not be terminated by the end of the string.
-- See also 'lines\''.
lines :: UTF8Bytes string index => UTF8 string -> [UTF8 string]
lines (Str b) = map Str (G.lines b) -- XXX: unnecessary map
-- | Split a string into a list of lines.
-- Lines are terminated by '\n' or the end of the string.
-- Empty lines may not be terminated by the end of the string.
-- This function preserves the terminators.
-- See also 'lines'.
lines' :: UTF8Bytes string index => UTF8 string -> [UTF8 string]
lines' (Str x) = map Str (G.lines' x) -- XXX: unnecessary map
| ghc/packages-utf8-string | Data/String/UTF8.hs | bsd-3-clause | 6,277 | 0 | 10 | 1,376 | 1,446 | 771 | 675 | 87 | 1 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
module Scripts.Opts
( setTimeout
, module Scripts.Opts
) where
import ClassyPrelude
import Options.Applicative
import Options.Applicative.Types
import Scripts.FCCForm471
import Scripts.FCCForm471ReviewAssignment
import Scripts.FCCForm471Certification
import Scripts.FCCForm471Common (Form471Num)
import Scripts.CreateCSCase
import Scripts.FCCForm486
import Scripts.SPINChangeIntake
import Scripts.InitialReview
import Scripts.Assignment
import Scripts.ReviewCommon
import Scripts.AdminReview
import Scripts.AdminIntake
import Scripts.ComadReview
import Appian.Client ( runAppianT, cookieModifier, runAppianT'
, LogMode (..), HostUrl (..), ScriptError (..), Appian, _BadUpdateError
)
import Appian.Instances
import Appian.Types (AppianUsername (..))
import Servant.Client
import Network.HTTP.Client (newManager)
import Network.HTTP.Client.TLS (tlsManagerSettings)
import Control.Arrow
import qualified Streaming.Prelude as S
import Control.Monad.Trans.Resource hiding (throwM)
import Data.Aeson (Value)
import Control.Lens
import Network.HTTP.Client
import Appian
import Stats.CsvStream
import Control.Monad.Logger
import Scripts.Common
import Control.Monad.Trans.Resource (runResourceT)
import Control.Arrow ((>>>))
import qualified Data.Csv as Csv
import Scripts.ProducerConsumer
import Control.Retry
import Control.Monad.Except (catchError, throwError)
import Scripts.Noise
import Test.QuickCheck
import Scripts.ExpressionTest
import Util.Parallel (LogFilePath, logFilePath, runParallelFileLoggingT)
import Scripts.Execute
import Scripts.Parseable
import Scripts.ViewFCCForm470
import Scripts.ViewFCCForm471
import Scripts.ComadIntake
import Scripts.FCCForm500Intake
import Scripts.DisplayInvoiceDetails
import Scripts.ServiceSubstitutionIntake
import Scripts.FCCForm471EditApplication
import Development.GitRev
getPassword :: IO String
getPassword = pure "EPCPassword123!"
runMultiple :: (LogMode -> Int -> IO ()) -> LogFilePath -> Int -> [Int] -> IO ()
runMultiple script logFilePrefix nRuns nUserList = mapM_ (mapM_ runScript . zip [1..nRuns] . repeat) nUserList
where
runScript (run, nUsers) = script (LogFile logFp) nUsers
where
logFp = logFilePrefix <> logFilePath suffix
suffix = "_" <> show nUsers <> "_" <> show run <> ".csv"
runAdminIntake :: FilePath -> Int -> BaseUrl -> LogMode -> IO ()
runAdminIntake = error "Appeal intake has been broken!"
-- runAdminIntake userFile nThreads baseUrl fp = runConsumer action baseUrl fp userFile nThreads
-- where
-- action login = adminIntake $ login ^. username . to AppianUsername
newtype UnsupportedReviewException = UnsupportedReviewException Text
instance Show UnsupportedReviewException where
show (UnsupportedReviewException msg) = "UnsupportedReviewException: " <> unpack msg
instance Exception UnsupportedReviewException
loginConsumer :: MonadIO m => TChan (ThreadControl a) -> (a -> m ()) -> m ()
loginConsumer chan f = S.mapM_ f $ S.map tcItem $ S.takeWhile notFinished $ S.repeatM (atomically $ readTChan chan)
loginConsumer' :: MonadIO m => TChan (ThreadControl a) -> (a -> m b) -> m (S.Of [b] ())
loginConsumer' chan f = S.toList $ S.mapM f $ S.map tcItem $ S.takeWhile notFinished $ S.repeatM (atomically $ readTChan chan)
loginProducer :: (MonadIO m, MonadBaseControl IO m, MonadThrow m, Csv.FromNamedRecord a) => CsvPath -> TChan (ThreadControl a) -> m ()
loginProducer fp chan = do
csvStreamByName >>> S.map Item >>> S.mapM_ (atomically . writeTChan chan) >>> runResourceT >>> runNoLoggingT $ fp
atomically $ writeTChan chan Finished
data MissingItemException = MissingItemException
instance Show MissingItemException where
show _ = "There was nothing provided to this consumer!"
instance Exception MissingItemException
newtype ScriptException = ScriptException Text
instance Show ScriptException where
show (ScriptException msg) = unpack msg
instance Exception ScriptException
run471Intake :: Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError Form471Num))]
run471Intake = runIt form471Intake
run471IntakeAndCertify :: Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError Form471Num))]
run471IntakeAndCertify = runIt form471IntakeAndCertify
run486Intake :: Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError (Maybe Text)))]
run486Intake = runIt form486Intake
runInitialReview :: ReviewBaseConf -> Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError Value))]
runInitialReview conf = runIt (initialReview conf)
runReviewAssign :: ReviewBaseConf -> Bounds -> HostUrl -> LogMode -> CsvPath -> NThreads -> NumRecords -> IO ()
runReviewAssign conf = runScriptExhaustive (assignment conf)
runNoise :: Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError ()))]
runNoise = runIt noise
run471Review :: Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError Value))]
run471Review = runIt form471Review
shouldRetry :: Monad m => RetryStatus -> Either ScriptError a -> m Bool
shouldRetry _ (Left err) = case err ^? _BadUpdateError . _1 of -- to fromException . traverse . badUpdateExceptionMsg of
Nothing -> pure False
Just txt -> pure $ isInfixOf "Cannot find task for " txt
shouldRetry _ (Right _) = pure False
findTaskRetryPolicy :: Monad m => RetryPolicyM m
findTaskRetryPolicy = exponentialBackoff 1000000 `mappend` limitRetries 10
form471IntakeAndCertify :: Form471Conf -> Appian Form471Num
form471IntakeAndCertify conf = do
formNum <- form471Intake conf
let certConf = CertConf formNum $ conf ^. applicant
certify = do
res <- form471Certification certConf
return $ Right res
certifyCatch = return . Left
eRes <- retrying findTaskRetryPolicy shouldRetry (const $ (certify `catchError` certifyCatch))
-- If still unsuccessful after all retries then re-throw
-- the error up the stack. Otherwise return the result
-- unchanged.
either throwError pure eRes
certify471Retrying :: CertConf -> Appian Form471Num
certify471Retrying certConf = retrying findTaskRetryPolicy shouldRetry (const $ certify `catchError` certifyCatch) >>= either throwError pure
where
certify = do
res <- form471Certification certConf
return $ Right res
certifyCatch = pure . Left
runComadInitialReview :: ReviewBaseConf -> Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError Value))]
runComadInitialReview baseConf = runIt $ comadInitialReview baseConf
newtype MaxSize = MaxSize Int
deriving (Num, Show, Eq, Ord)
runReverseTest :: Bounds -> HostUrl -> LogMode -> Login -> MaxSize -> IO ()
runReverseTest bounds (HostUrl hostUrl) logMode login (MaxSize n) = do
mgr <- newManager $ setTimeout (responseTimeoutMicro 90000000000) $ tlsManagerSettings { managerModifyResponse = cookieModifier }
let env = ClientEnv mgr (BaseUrl Https hostUrl 443 mempty)
appianState = newAppianState bounds
quickCheckWith (stdArgs { maxSize = n } ) (prop_reverseList logMode appianState env login)
runReview :: ReviewBaseConf -> Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError Value))]
runReview baseConf = runIt (finalReview baseConf)
-- runIt :: (Csv.FromNamedRecord a, Show a, HasLogin a) => (a -> Appian b) -> Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError b))]
-- runIt f bounds (HostUrl hostUrl) logMode csvInput (RampupTime delay) (NThreads n) = do
-- mgr <- newManager $ setTimeout (responseTimeoutMicro 90000000000) $ tlsManagerSettings { managerModifyResponse = cookieModifier }
-- let env = ClientEnv mgr (BaseUrl Https hostUrl 443 mempty)
-- appianState = newAppianState bounds
-- runResourceT $ runStdoutLoggingT $ runParallel $ Parallel (nThreads n) (S.zip (S.each [0..]) $ void (csvStreamByName csvInput)) (\(i, a) -> do
-- let d = (i * (delay `div` n))
-- threadDelay $ trace (show d) d
-- res <- liftIO $ runAppianT logMode (f a) appianState env (getLogin a)
-- logResult res
-- return res
-- )
runLogger :: (MonadBaseControl IO m, MonadIO m, Forall (Pure m), MonadThrow m) => LogMode -> LoggingT m a -> m a
runLogger LogStdout f = runStdoutLoggingT f
runLogger (LogFile logFilePath) f = runParallelFileLoggingT logFilePath f
-- runSPINIntake :: Bounds -> HostUrl -> LogMode -> CsvPath -> RampupTime -> NThreads -> IO [Maybe (Either ServantError (Either ScriptError (Maybe Text)))]
-- runSPINIntake = runIt spinChangeIntake
dispResults :: [Either ServantError (Either ScriptError a)] -> IO ()
dispResults results = do
let appianErrors = results ^.. traverse . _Right . _Left
serverErrors = results ^.. traverse . _Left
successes = results ^.. traverse . _Right . _Right
mapM_ print serverErrors
mapM_ print appianErrors
putStrLn $ "Successfully executed: " <> tshow (length successes)
putStrLn $ "Script Errors: " <> tshow (length appianErrors)
putStrLn $ "Server Errors: " <> tshow (length serverErrors)
commandsInfo :: ParserInfo (IO ())
commandsInfo = info (helper <*> parseCommands)
( fullDesc
<> progDesc progInfo
)
progInfo = "Various scripts written for the EPC system.\n"
<> $(gitHash) <> "\n("
<> $(gitCommitDate) <> ")\n"
parseCommands :: Parser (IO ())
parseCommands = subparser
( command "form471Intake" form471IntakeInfo
<> command "comadInitial" comadInitialInfo
<> command "form471IntakeAndCertify" form471IntakeAndCertifyInfo
<> command "form486Intake" form486IntakeInfo
<> command "initialReview" initialReviewInfo
<> command "pcAssign" reviewAssignInfo
<> command "form471Review" form471ReviewInfo
<> command "noise" noiseInfo
<> command "reverseTest" reverseTestInfo
<> command "createCSCase" createCSCaseInfo
<> command "viewFCCForm470" viewFCCForm470Info
<> command "viewFCCForm471" viewFCCForm471Info
<> command "COMADIntake" comadIntakeInfo
<> command "form500Intake" form500IntakeInfo
<> command "displayInvoiceDetails" displayInvoiceDetailsInfo
<> command "serviceSubstitutionIntake" serviceSubstitutionIntakeInfo
<> command "edit471App" edit471ApplicationInfo
<> command "form471Certification" form471CertifyInfo
<> command "spinChangeIntake" spinChangeIntakeInfo
)
urlParser :: Parser BaseUrl
urlParser = BaseUrl
<$> flag Http Https
( long "secure"
<> short 's'
<> help "If set uses https."
)
<*> strOption
( long "host-name"
<> short 'n'
<> help "The hostname of the server to use."
)
<*> option auto
( long "port"
<> short 'p'
<> help "The port to use for connecting to the remote host."
)
<*> pure ""
-- hostUrlParser :: Parser HostUrl
-- hostUrlParser = HostUrl
-- <$> strOption
-- ( long "host-name"
-- <> short 'n'
-- <> help "The hostname of the server to use."
-- )
form471IntakeInfo :: ParserInfo (IO ())
form471IntakeInfo = info (helper <*> form471Parser)
( fullDesc
<> progDesc "Runs the FCC Form 471 intake performance script"
)
form471Parser :: Parser (IO ())
form471Parser = fmap void $ run471Intake
<$> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
form471IntakeAndCertifyInfo :: ParserInfo (IO ())
form471IntakeAndCertifyInfo = info (helper <*> form471IntakeAndCertifyParser)
( fullDesc
<> progDesc "Runs the 471 Intake Script followed by the 471 Certification script."
)
form471IntakeAndCertifyParser :: Parser (IO ())
form471IntakeAndCertifyParser = fmap void $ run471IntakeAndCertify
<$> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
form486IntakeInfo :: ParserInfo (IO ())
form486IntakeInfo = info (helper <*> form486IntakeParser)
( fullDesc
<> progDesc "Runs the FCC Form 486 Intake script"
)
form486IntakeParser :: Parser (IO ())
form486IntakeParser = fmap void $ run486Intake
<$> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
initialReviewInfo :: ParserInfo (IO ())
initialReviewInfo = info (helper <*> initialReviewParser)
( fullDesc
<> progDesc "Runs the 2017 SPIN Change Initial Review script"
)
initialReviewParser :: Parser (IO ())
initialReviewParser = fmap void $ runInitialReview
<$> reviewBaseConfParser
<*> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
form471ReviewInfo :: ParserInfo (IO ())
form471ReviewInfo = info (helper <*> form471ReviewParser)
( fullDesc
<> progDesc "Runs the Form 471 Review script"
)
form471ReviewParser :: Parser (IO ())
form471ReviewParser = fmap void $ run471Review
<$> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
noiseInfo :: ParserInfo (IO ())
noiseInfo = info (helper <*> noiseParser)
( fullDesc
<> progDesc "Runs the noise script"
)
noiseParser :: Parser (IO ())
noiseParser = fmap void $ runNoise
<$> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
reviewAssignInfo :: ParserInfo (IO ())
reviewAssignInfo = info (helper <*> reviewAssignParser)
( fullDesc
<> progDesc "Runs the 2017 SPIN Change Initial Review script"
)
reviewAssignParser :: Parser (IO ())
reviewAssignParser = fmap void $ runReviewAssign
<$> reviewBaseConfParser
<*> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> nthreadParser
<*> parseOption
reverseTestInfo :: ParserInfo (IO ())
reverseTestInfo = info (helper <*> reverseTestParser)
( fullDesc
<> progDesc "Runs the list reversing test script"
)
createCSCaseInfo :: ParserInfo (IO ())
createCSCaseInfo = info (helper <*> createCSCaseParser)
( fullDesc
<> progDesc "Runs the 'Create a Customer Service Case' script"
)
where
createCSCaseParser = runItParser runCreateCsCase
viewFCCForm470Info :: ParserInfo (IO ())
viewFCCForm470Info = info (helper <*> viewForm470Parser)
( fullDesc
<> progDesc "Runs the 'View FCC Form 470' script"
)
where
viewForm470Parser = runItParser runViewForm470
viewFCCForm471Info :: ParserInfo (IO ())
viewFCCForm471Info = info (helper <*> viewForm471Parser)
( fullDesc
<> progDesc "Runs the 'View FCC Form 471' script"
)
where
viewForm471Parser = runItParser runViewForm471
comadIntakeInfo :: ParserInfo (IO ())
comadIntakeInfo = info (helper <*> comadIntakeParser)
( fullDesc
<> progDesc "Runs the 'Comad Intake' script"
)
where
comadIntakeParser = runItParser runComadIntake
form500IntakeInfo :: ParserInfo (IO ())
form500IntakeInfo = info (helper <*> form500IntakeParser)
( fullDesc
<> progDesc "Runs the 'Form 500 Intake' script"
)
where
form500IntakeParser = runItParser runForm500Intake
displayInvoiceDetailsInfo :: ParserInfo (IO ())
displayInvoiceDetailsInfo = info (helper <*> displayInvoiceDetailsParser)
( fullDesc
<> progDesc "Runs the 'Display Invoice Details' script"
)
where
displayInvoiceDetailsParser = runItParser runViewInvoiceDetails
serviceSubstitutionIntakeInfo :: ParserInfo (IO ())
serviceSubstitutionIntakeInfo = info (helper <*> serviceSubstitutionIntakeParser)
( fullDesc
<> progDesc "Runs the 'Service Substitution Intake' script"
)
where
serviceSubstitutionIntakeParser = runItParser runServiceSubstitution
edit471ApplicationInfo :: ParserInfo (IO ())
edit471ApplicationInfo = info (helper <*> edit471ApplicationParser)
( fullDesc
<> progDesc "Runs the 'Service Substitution Intake' script"
)
where
edit471ApplicationParser = runItParser runEdit471Application
form471CertifyInfo :: ParserInfo (IO ())
form471CertifyInfo = info (helper <*> form471CertifyParser)
( fullDesc
<> progDesc "Runs the 'Service Substitution Intake' script"
)
where
form471CertifyParser = runItParser $ runIt certify471Retrying
reverseTestParser :: Parser (IO ())
reverseTestParser = fmap void $ runReverseTest
<$> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> loginParser
<*> maxSizeParser
comadInitialInfo :: ParserInfo (IO ())
comadInitialInfo = info (helper <*> comadInitialParser)
( fullDesc
<> progDesc "Runs the PC COMAD Initial Review script"
)
comadInitialParser :: Parser (IO ())
comadInitialParser = fmap void $ runComadInitialReview
<$> pure comadInitial2017
<*> boundsParser
<*> hostUrlParser
<*> logModeParser
<*> csvConfParser
<*> rampupParser
<*> nthreadParser
-- csvConfParser :: Parser CsvPath
-- csvConfParser = fromString <$>
-- strOption
-- ( long "csv-conf"
-- <> short 'i'
-- <> help "The csv config file for 471 intake."
-- )
-- spinChangeInfo :: ParserInfo (IO ())
-- spinChangeInfo = info (helper <*> spinChangeParser)
-- ( fullDesc
-- <> progDesc "Runs the SPIN Change intake script."
-- )
-- spinChangeParser :: Parser (IO ())
-- spinChangeParser = void <$> (runSPINIntake
-- <$> boundsParser
-- <*> (HostUrl <$> strOption
-- ( long "host-url"
-- <> help "The url of the host to use."
-- ))
-- <*> logModeParser
-- <*> csvConfParser
-- <*> rampupParser
-- <*> nthreadParser
-- )
spinChangeIntakeInfo :: ParserInfo (IO ())
spinChangeIntakeInfo = info (helper <*> spinChangeIntakeParser)
( fullDesc
<> progDesc "Runs the 'SPIN Change Intake' script"
)
where
spinChangeIntakeParser = runScriptExhaustiveParser runSpinChangeIntake
-- nthreadParser :: Parser NThreads
-- nthreadParser = NThreads
-- <$> option auto
-- ( long "nThreads"
-- <> help "The number of concurrent threads to execute."
-- )
adminIntakeInfo :: ParserInfo (IO ())
adminIntakeInfo = info (helper <*> adminIntakeParser)
( fullDesc
<> progDesc "Runs Administrative Correction intake"
)
adminIntakeParser :: Parser (IO ())
adminIntakeParser = runAdminIntake
<$> userParser
<*> option auto
( long "num-threads"
<> short 't'
)
<*> urlParser
<*> logModeParser
parseMany :: ReadM [String]
parseMany = readerAsk >>= pure . words
parseManyR :: Read a => ReadM [a]
parseManyR = parseMany >>= readMany
where
readMany l = traverse readIt l
readIt x = case readMay x of
Just y -> return y
Nothing -> fail $ "Could not read " <> show x
-- logModeParser :: Parser LogMode
-- logModeParser = (
-- strOption
-- ( long "stdout"
-- <> help "Log messages to stdout."
-- )) *> pure LogStdout
-- <|>
-- LogFile <$> logFileParser
-- logFileParser :: Parser LogFilePath
-- logFileParser = logFilePath <$> strOption
-- ( long "log-file-path"
-- <> short 'l'
-- <> help "The path of the file to write the logs to."
-- )
userParser :: Parser FilePath
userParser = strOption
( long "user-csv-prefix"
<> short 'i'
)
threadsParser :: Parser [Int]
threadsParser = option parseManyR
( long "num-users-list"
<> short 'u'
)
stderrLn :: MonadIO m => Text -> m ()
stderrLn txt = hPut stderr $ encodeUtf8 txt <> "\n"
-- boundsParser :: Parser Bounds
-- boundsParser = Bounds
-- <$> option auto
-- ( long "lower"
-- <> help "The minimum for the think timer"
-- )
-- <*> option auto
-- ( long "upper"
-- <> help "The maximum for the think timer"
-- )
fyParser :: Parser FundingYear
fyParser = option readFy (long "fy")
readFy :: ReadM FundingYear
readFy = do
str <- readerAsk
readFy_ str
readFy_ "fy16" = pure FY2016
readFy_ "fy17" = pure FY2017
readFy_ "fy18" = pure FY2018
readFy_ _ = fail $ "Unrecognized fy! Valid ones are: " <> "fy16, fy17, and fy18"
reviewTypeParser :: Parser ReviewType
reviewTypeParser = option readReviewType (long "review-type")
readReviewType :: ReadM ReviewType
readReviewType = do
str <- readerAsk
readReviewType_ str
readReviewType_ "SPINChange" = pure RevSpinChange
readReviewType_ "appeal" = pure RevAppeals
readReviewType_ "Form486" = pure RevForm486
readReviewType_ "COMAD" = pure RevCOMAD
readReviewType_ "Form500" = pure RevForm500
readReviewType_ "ServSub" = pure RevServSub
readReviewType_ "AdminCorrection" = pure RevAdminCorrection
readReviewType_ "SRCSSPINChange" = pure RevSRCSpinChange
readReviewType_ "BulkSPINChange" = pure RevBulkSpinChange
readReviewType_ _ = fail $ "Unrecognized review type! Valid ones are: " <> "SPINChange, appeal, Form486, COMAD, ServSub, AdminCorrection, SRCSPINChange, and BulkSPINChange"
reviewerTypeParser :: Parser ReviewerType
reviewerTypeParser = option readReviewerType (long "reviewer-type")
readReviewerType :: ReadM ReviewerType
readReviewerType = do
str <- readerAsk
readReviewerType_ str
readReviewerType_ "initial" = pure RevInitial
readReviewerType_ "final" = pure RevInitial
readReviewerType_ "solix" = pure RevInitial
readReviewerType_ "usac" = pure RevInitial
readReviewerType_ "HSInit" = pure RevInitial
readReviewerType_ "HSFinal" = pure RevInitial
readReviewerType_ _ = fail $ "Unrecognized reviewer type! Valit ones are: " <> "initial, final, solix, usac, HSInitial, and HSFinal"
reviewBaseConfParser :: Parser ReviewBaseConf
reviewBaseConfParser = ReviewBaseConf
<$> reviewTypeParser
<*> reviewerTypeParser
<*> fyParser
-- rampupParser :: Parser RampupTime
-- rampupParser = mkRampup
-- <$> option auto
-- ( long "rampup"
-- <> help "The rampup period (in seconds) for the script"
-- )
loginParser :: Parser Login
loginParser = Login
<$> fmap pack (strOption
( long "username"
<> short 'u'
))
<*> fmap pack (strOption
( long "password"
<> short 'p'
))
maxSizeParser :: Parser MaxSize
maxSizeParser = MaxSize
<$> option auto
( long "maxSize"
<> short 'm'
)
| limaner2002/EPC-tools | USACScripts/src/Scripts/Opts.hs | bsd-3-clause | 22,963 | 0 | 25 | 4,546 | 5,130 | 2,624 | 2,506 | -1 | -1 |
module Buffer (
Arena
, prepareArena
, prepareDummyArena
, getBuffer
, borrowBuffer
, returnBuffer
) where
import Control.Applicative ((<$>))
import Control.Monad (replicateM)
import Data.IORef (IORef, newIORef, atomicModifyIORef)
import Foreign.ForeignPtr (mallocForeignPtrBytes)
import Types
newtype Arena = Arena (IORef [Buffer])
getBuffer :: IO Buffer
getBuffer = mallocForeignPtrBytes recvBufferSize
prepareArena :: IO Arena
prepareArena = do
bufs <- replicateM arenaSize getBuffer
Arena <$> newIORef bufs
prepareDummyArena :: IO Arena
prepareDummyArena = Arena <$> newIORef []
borrowBuffer :: Arena -> IO Buffer
borrowBuffer (Arena ref) = do
mbuf <- atomicModifyIORef ref borrow
case mbuf of
Nothing -> getBuffer
Just buf -> return buf
where
borrow [] = ([], Nothing)
borrow (b:bs) = (bs, Just b)
returnBuffer :: Arena -> Buffer -> IO ()
returnBuffer (Arena ref) buf = atomicModifyIORef ref $ \bs -> (buf:bs, ())
| kazu-yamamoto/witty | src/Buffer.hs | bsd-3-clause | 990 | 0 | 10 | 202 | 330 | 176 | 154 | 31 | 3 |
{-# LANGUAGE OverlappingInstances #-}
{-# LANGUAGE UndecidableInstances #-}
-- | Generic representation of EDSL syntax. The types 'Term' and 'TERM' represent abstract syntax.
-- The functions 'smartConstr' and 'smartSugar' are used to define smart constructors, which can be
-- thought of as the concrete EDSL syntax.
--
-- This module reexports many things from the @compdata@ package.
--
-- Example use:
--
-- > {-# LANGUAGE DeriveFoldable, DeriveFunctor, TemplateHaskell #-}
-- >
-- > import Data.Foldable
-- > import Language.Embedded.Syntax
-- >
-- > data Arith a = Int Int | Add a a
-- > deriving (Functor, Foldable)
-- >
-- > -- Enable rendering
-- > derive [makeShowF, makeShowConstr] [''Arith]
-- > instance Render Arith
-- >
-- > type Exp a = TERM Arith a
-- >
-- > -- Smart constructor for integer literals
-- > int :: Int -> Exp Int
-- > int = smartConstr . Int
-- >
-- > -- Smart constructor for addition
-- > (<+>) :: Exp Int -> Exp Int -> Exp Int
-- > (<+>) = smartConstr Add
--
-- Testing in GHCi:
--
-- > *Main> drawAST (int 1 <+> int 2 :: Exp Int)
-- > Add
-- > ├╴Int 1
-- > └╴Int 2
module Language.Embedded.Syntax
( -- * Term representation
Cxt (..)
, Context
, Term
, unTerm
, simpCxt
, (:+:) (..)
, (:<:)
, inj
, proj
, inject
, project
, Alg
, AlgM
, cata
, cataM
, module Data.Comp.Render
, module Data.Comp.Derive
-- Exports Foldable and Traversable
, module Data.Comp.Generic
, TERM (..)
, ConstrType
, SmartConstr (..)
-- * Syntactic sugar
, Syntactic (..)
, resugar
, desugar'
, sugar'
, SyntacticN (..)
, smartSugar
-- * Rendering
, showAST
, drawAST
, writeHtmlAST
) where
import Data.Comp
import Data.Comp.Ops -- For the constructors of (:+:)
import Data.Comp.Show () -- For instances
import Data.Comp.Render
import Data.Comp.Derive
import Data.Comp.Generic
----------------------------------------------------------------------------------------------------
-- * Term representation
----------------------------------------------------------------------------------------------------
-- | 'Term' with a phantom type parameter
newtype TERM f a = TERM {unTERM :: Term f}
deriving (Eq, Ord)
instance Show (Term f) => Show (TERM f a)
where
show = show . unTERM
-- | The type of a constructor corresponding to a smart constructor
type family ConstrType a (sup :: * -> *) (sub :: * -> *)
-- | Smart constructors
class (ConstrType smart (SmartSup smart) (SmartSub smart con) ~ con) => SmartConstr smart con
where
type SmartSup smart :: * -> *
type SmartSub smart con :: * -> *
-- | Make a smart constructor
smartConstr :: con -> smart
type instance ConstrType (Term sup) sup sub = sub (Term sup)
instance (sub :<: sup) => SmartConstr (Term sup) (sub (Term sup))
where
type SmartSup (Term sup) = sup
type SmartSub (Term sup) (sub (Term sup)) = sub
smartConstr = Term . inj
type instance ConstrType (Term sup -> a) sup sub = Term sup -> ConstrType a sup sub
instance
( SmartConstr smart con
, (ConstrType (Term sup -> smart) (SmartSup smart) (SmartSub smart con)) ~ (Term sup -> con)
) =>
SmartConstr (Term sup -> smart) (Term sup -> con)
where
type SmartSup (Term sup -> smart) = SmartSup smart
type SmartSub (Term sup -> smart) (Term sup -> con) = SmartSub smart con
smartConstr f = smartConstr . f
type instance ConstrType (TERM sup a) sup sub = sub (Term sup)
instance (sub :<: sup) => SmartConstr (TERM sup a) (sub (Term sup))
where
type SmartSup (TERM sup a) = sup
type SmartSub (TERM sup a) (sub (Term sup)) = sub
smartConstr = TERM . Term . inj
type instance ConstrType (TERM sup a -> smart) sup sub = Term sup -> ConstrType smart sup sub
instance
( SmartConstr smart con
, (ConstrType (TERM sup a -> smart) (SmartSup smart) (SmartSub smart con)) ~ (Term sup -> con)
) =>
SmartConstr (TERM sup a -> smart) (Term sup -> con)
where
type SmartSup (TERM sup a -> smart) = SmartSup smart
type SmartSub (TERM sup a -> smart) (Term sup -> con) = SmartSub smart con
smartConstr f = smartConstr . f . unTERM
----------------------------------------------------------------------------------------------------
-- * Syntactic sugar
----------------------------------------------------------------------------------------------------
-- | \"Syntactic sugar\" -- types that can be converted to and from 'TERM'
--
-- For details, see "Combining Deep and Shallow Embedding for EDSL"
-- (TFP 2013, <http://www.cse.chalmers.se/~emax/documents/svenningsson2013combining.pdf>).
--
-- It is usually assumed that @(`desugar` (`sugar` a))@ has the same meaning as @a@.
class Syntactic a
where
type PF a :: * -> *
type Internal a
desugar :: a -> TERM (PF a) (Internal a)
sugar :: TERM (PF a) (Internal a) -> a
-- | Sugar-based type casting
resugar :: (Syntactic a, Syntactic b, Internal a ~ Internal b, PF a ~ PF b) => a -> b
resugar = sugar . desugar
desugar' :: Syntactic a => a -> Term (PF a)
desugar' = unTERM . desugar
sugar' :: Syntactic a => Term (PF a) -> a
sugar' = sugar . TERM
instance Syntactic (TERM f a)
where
type PF (TERM f a) = f
type Internal (TERM f a) = a
desugar = id
sugar = id
-- | N-ary syntactic functions
class SyntacticN f internal | f -> internal
where
-- | Informally:
--
-- > desugarN f a b ... k = desugar $ f (sugar a) (sugar b) ... (sugar k)
desugarN :: f -> internal
-- | Informally:
--
-- > sugarN f a b ... k = sugar $ f (desugar a) (desugar b) ... (desugar k)
sugarN :: internal -> f
instance (Syntactic f, fi ~ TERM (PF f) (Internal f)) => SyntacticN f fi
where
desugarN = desugar
sugarN = sugar
instance
( Syntactic a
, ia ~ Internal a
, pf ~ PF a
, SyntacticN f fi
) =>
SyntacticN (a -> f) (TERM pf ia -> fi)
where
desugarN f = desugarN . f . sugar
sugarN f = sugarN . f . desugar
-- | Make a \"sugared\" smart constructor
--
-- Informally:
--
-- > smartSugar f a b ... k = sugar $ TERM $ Term $ inj $ f (desugar' a) (desugar' b) ... (desugar' k)
smartSugar :: (SyntacticN sugar smart, SmartConstr smart con) => con -> sugar
smartSugar = sugarN . smartConstr
-- TODO
-- The following doesn't work:
--
-- data A a = A a a
--
-- aaa :: (Syntactic a, PF a ~ A) => a -> a -> a
-- aaa = smartSugar A
--
-- It seems that the `smart` and `con` type variables (in the type of `smartSugar`) are not
-- completely resolved. A fix is to do:
--
-- aaa = sugarN (smartConstr A :: TERM A a -> TERM A a -> TERM A a)
--
-- or
--
-- aaa a b = sugar $ smartConstr A (desugar a) (desugar b)
--
-- It should be possible to fix this by putting stronger constraints on `SmartConstr` and `SugarN`.
-- But this will probably be easier to do with closed type families.
----------------------------------------------------------------------------------------------------
-- * Rendering
----------------------------------------------------------------------------------------------------
-- | Show the syntax tree using unicode art
showAST :: (Syntactic a, Render (PF a)) => a -> String
showAST = showTerm . desugar'
-- | Draw the syntax tree on the terminal using unicode art
drawAST :: (Syntactic a, Render (PF a)) => a -> IO ()
drawAST = drawTerm . desugar'
-- | Write the syntax tree to an HTML file with foldable nodes
writeHtmlAST :: (Syntactic a, Render (PF a)) => FilePath -> a -> IO ()
writeHtmlAST file = writeHtmlTerm file . desugar'
| emilaxelsson/compass | src/Language/Embedded/Syntax.hs | bsd-3-clause | 7,721 | 0 | 12 | 1,820 | 1,743 | 981 | 762 | -1 | -1 |
{-# LANGUAGE ForeignFunctionInterface #-}
-------------------------------------------------------------------------------
-- |
-- Copyright : (c) 2015 Michael Carpenter
-- License : BSD3
-- Maintainer : Michael Carpenter <[email protected]>
-- Stability : experimental
-- Portability : portable
--
-------------------------------------------------------------------------------
module Sound.Csound.ScoreHandling (
--csoundReadScore,
csoundGetScoreTime,
csoundIsScorePending,
csoundSetScorePending,
--csoundGetScoreOffsetSeconds,
--csoundSetScoreOffsetSeconds,
csoundRewindScore
--csoundSetCscoreCallback,
--csoundScoreSort,
--csoundScoreExtract
) where
import Control.Monad.IO.Class
import Foreign.C.Types
import Foreign.Ptr
--foreign import ccall "csound.h csoundReadScore" csoundReadScore'
foreign import ccall "csound.h csoundGetScoreTime" csoundGetScoreTime' :: Ptr () -> IO CDouble
foreign import ccall "csound.h csoundIsScorePending" csoundIsScorePending' :: Ptr () -> IO CInt
foreign import ccall "csound.h csoundSetScorePending" csoundSetScorePending' :: Ptr () -> CInt -> IO ()
--foreign import ccall "csound.h csoundGetScoreOffsetSeconds" csoundGetScoreOffsetSeconds'
--foreign import ccall "csound.h csoundSetScoreOffsetSeconds" csoundSetScoreOffsetSeconds'
foreign import ccall "csound.h csoundRewindScore" csoundRewindScore' :: Ptr () -> IO ()
--foreign import ccall "csound.h csoundSetCscoreCallback" csoundSetCscoreCallback'
--foreign import ccall "csound.h csoundScoreSort" csoundScoreSort'
--foreign import ccall "csound.h csoundScoreExtract" csoundScoreExtract'
--csoundReadScore
--csoundReadScore
csoundGetScoreTime :: MonadIO m => Ptr () -> m CDouble
csoundGetScoreTime csnd = liftIO (csoundGetScoreTime' csnd)
csoundIsScorePending :: MonadIO m => Ptr () -> m CInt
csoundIsScorePending csnd = liftIO (csoundIsScorePending' csnd)
csoundSetScorePending :: MonadIO m => Ptr () -> CInt -> m ()
csoundSetScorePending csnd pending = liftIO (csoundSetScorePending' csnd pending)
--csoundGetScoreOffsetSeconds
--csoundGetScoreOffsetSeconds
--csoundSetScoreOffsetSeconds
--csoundSetScoreOffsetSeconds
csoundRewindScore :: MonadIO m => Ptr () -> m ()
csoundRewindScore csnd = liftIO (csoundRewindScore' csnd)
--csoundSetCscoreCallback
--csoundSetCscoreCallback
--csoundScoreSort
--csoundScoreSort
--csoundScoreExtract
--csoundScoreExtract
| oldmanmike/CsoundRaw | src/Sound/Csound/ScoreHandling.hs | bsd-3-clause | 2,428 | 0 | 9 | 293 | 353 | 194 | 159 | 21 | 1 |
{-# OPTIONS_GHC -Wall #-}
{-# LANGUAGE OverloadedStrings #-}
module Reporting.Error.Publish
( Error(..)
, toReport
)
where
import qualified Text.PrettyPrint.ANSI.Leijen as P
import Text.PrettyPrint.ANSI.Leijen ((<>))
import Deps.Diff (Magnitude, magnitudeToString)
import qualified Elm.Package as Pkg
import qualified Reporting.Error.Help as Help
-- ERRORS
data Error
= Application
| NotInitialVersion Pkg.Version
| AlreadyPublished Pkg.Version
| InvalidBump Pkg.Version Pkg.Version
| BadBump Pkg.Version Pkg.Version Magnitude Pkg.Version Magnitude
| NoSummary
| NoExposed
| NoReadme
| ShortReadme
| NoLicense
| MissingTag Pkg.Version
| NoGit
| LocalChanges Pkg.Version
-- TO REPORT
toReport :: Error -> Help.Report
toReport err =
case err of
Application ->
Help.report "UNPUBLISHABLE" Nothing "I cannot publish applications, only packages!" []
NotInitialVersion vsn ->
Help.docReport "INVALID VERSION" Nothing
( P.fillSep
["I","cannot","publish"
,P.red (P.text (Pkg.versionToString vsn))
,"as","the","initial","version."
]
)
[ P.fillSep
["Change","it","to",P.green "1.0.0","which","is"
,"the","initial","version","for","all","Elm","packages."
]
]
AlreadyPublished vsn ->
Help.docReport "ALREADY PUBLISHED" Nothing
( P.vcat
[ P.fillSep
[ "Version", P.green (P.text (Pkg.versionToString vsn))
, "has", "already", "been", "published.", "You", "cannot"
, "publish", "it", "again!"
]
, "Try using the `bump` command:"
]
)
[ P.dullyellow $ P.indent 4 $ P.text "elm bump"
, Help.reflow $
"It computes the version number based on API changes, ensuring\
\ that no breaking changes end up in PATCH releases!"
]
InvalidBump statedVersion latestVersion ->
Help.docReport "INVALID VERSION" (Just "elm.json")
( P.fillSep $
["Your","elm.json","says","the","next","version","should","be"
,P.red (P.text (Pkg.versionToString statedVersion)) <> ","
,"but","that","is","not","valid","based","on","the","previously"
,"published","versions."
]
)
[ P.fillSep $
["Change","the","version","back","to"
,P.green (P.text (Pkg.versionToString latestVersion))
,"which","is","the","most","recently","published","version."
,"From","there,","have","Elm","bump","the","version","by","running:"
]
, P.indent 4 $ P.green "elm bump"
, Help.reflow $
"If you want more insight on the API changes Elm detects, you\
\ can run `elm diff` at this point as well."
]
BadBump old new magnitude realNew realMagnitude ->
Help.docReport "INVALID VERSION" (Just "elm.json")
(
P.fillSep $
["Your","elm.json","says","the","next","version","should","be"
,P.red (P.text (Pkg.versionToString new)) <> ","
,"indicating","a",P.text (magnitudeToString magnitude)
,"change","to","the","public","API."
,"This","does","not","match","the","API","diff","given","by:"
]
)
[ P.indent 4 $ P.text $
"elm diff " ++ Pkg.versionToString old
, P.fillSep $
["This","command","says","this","is","a"
,P.text (magnitudeToString realMagnitude)
,"change,","so","the","next","version","should","be"
,P.green (P.text (Pkg.versionToString realNew)) <> "."
,"Double","check","everything","to","make","sure","you"
,"are","publishing","what","you","want!"
]
, Help.reflow $
"Also, next time use `elm bump` and I'll figure all this out for you!"
]
NoSummary ->
Help.docReport "NO SUMMARY" (Just "elm.json")
( P.fillSep $
[ "To", "publish", "a", "package,", "your", "elm.json", "must"
, "have", "a", P.dullyellow "\"summary\"", "field", "that", "gives"
, "a", "consice", "overview", "of", "your", "project."
]
)
[ Help.reflow $
"The summary must be less than 80 characters. It should describe\
\ the concrete use of your package as clearly and as plainly as possible."
]
NoExposed ->
Help.docReport "NO EXPOSED MODULES" (Just "elm.json")
( P.fillSep $
[ "To", "publish", "a", "package,", "the"
, P.dullyellow "\"exposed-modules\"", "field", "of", "your"
, "elm.json", "must", "list", "at", "least", "one", "module."
]
)
[ Help.reflow $
"What is the point of a package that has no modules?!"
]
NoReadme ->
badReadmeReport "NO README" $
"Every published package must have a helpful README.md\
\ file, but I do not see one in your project."
ShortReadme ->
badReadmeReport "SHORT README" $
"This README.md is too short. Having more details will help\
\ people assess your package quickly and fairly."
NoLicense ->
Help.report "NO LICENSE FILE" (Just "LICENSE")
"By publishing a package you are inviting the Elm community to build\
\ upon your work. But without knowing your license, we have no idea if\
\ that is legal!"
[ Help.reflow $
"Once you pick an OSI approved license from <https://spdx.org/licenses/>,\
\ you must share that choice in two places. First, the license\
\ identifier must appear in your elm.json file. Second, the full\
\ license text must appear in the root of your project in a file\
\ named LICENSE. Add that file and you will be all set!"
]
MissingTag version ->
let vsn = Pkg.versionToString version in
Help.docReport "NO TAG" Nothing
( P.fillSep $
[ "Packages", "must", "be", "tagged", "in", "git,", "but", "I"
, "cannot", "find", "a", P.green (P.text vsn), "tag."
]
)
[ P.vcat
[ "These tags make it possible to find this specific version on GitHub."
, "To tag the most recent commit and push it to GitHub, run this:"
]
, P.indent 4 $ P.dullyellow $ P.vcat $ map P.text $
[ "git tag -a " ++ vsn ++ " -m \"release version " ++ vsn ++ "\""
, "git push origin " ++ vsn
]
, "The -m flag is for a helpful message. Try to make it more informative!"
]
NoGit ->
Help.report "NO GIT" Nothing
"I searched your PATH environment variable for `git` and could not\
\ find it. Is it available through your PATH?"
[ Help.reflow $
"Who cares about this? Well, I currently use `git` to check if there\
\ are any local changes in your code. Local changes are a good sign\
\ that some important improvements have gotten mistagged, so this\
\ check can be extremely helpful for package authors!"
, Help.note $
"We plan to do this without the `git` binary in a future release."
]
LocalChanges version ->
let vsn = Pkg.versionToString version in
Help.docReport "LOCAL CHANGES" Nothing
( P.fillSep $
[ "The", "code", "tagged", "as", P.green (P.text vsn), "in"
, "git", "does", "not", "match", "the", "code", "in", "your"
, "working", "directory.", "This", "means", "you", "have"
, "commits", "or", "local", "changes", "that", "are", "not"
, "going", "to", "be", "published!"
]
)
[ Help.note $
"If you are sure everything is in order, you can run `git checkout "
++ vsn ++ "` and publish your code from there."
]
badReadmeReport :: String -> String -> Help.Report
badReadmeReport title summary =
Help.report title (Just "README.md") summary
[ Help.reflow $
"When people look at your README, they are wondering:"
, P.vcat
[ " - What does this package even do?"
, " - Will it help me solve MY problems?"
]
, Help.reflow $
"So I recommend starting your README with a small example of the\
\ most common usage scenario. Show people what they can expect if\
\ they learn more!"
, Help.note $
"By publishing your package, you are inviting people to invest time\
\ in understanding your work. Spending an hour to communicate your\
\ knowledge more clearly can save the community days or weeks of time\
\ in aggregate, and saving time in aggregate is the whole point of\
\ publishing packages! People really appreciate it, and it makes the\
\ whole ecosystem feel nicer!"
]
| evancz/builder | src/Reporting/Error/Publish.hs | bsd-3-clause | 9,038 | 0 | 19 | 2,801 | 1,729 | 986 | 743 | 155 | 13 |
{-# LANGUAGE RecordWildCards #-}
module Day21 where
{- Day 21: RPG Simulator 20XX -}
data Player = Player { hp :: Int, dmg :: Int, armor :: Int, price :: Int} deriving (Eq)
data Weapon = Weapon { costw :: Int, dmgw :: Int } deriving (Eq)
data Armor = Armor { costa :: Int, armora :: Int } deriving (Eq)
data Ring = Ring { costr :: Int, dmgr :: Int, armorr :: Int } deriving (Eq)
data Shop = Shop { weapons :: [Weapon], armors :: [Armor], rings :: [Ring] } deriving (Eq)
damage :: Player -> Int -> Player
damage Player{..} attack = Player (hp - max 1 (attack - armor)) dmg armor price
shop :: Shop
shop = Shop weapons armors rings
where weapons = [ Weapon 8 4
, Weapon 10 5
, Weapon 25 6
, Weapon 40 7
, Weapon 74 8
]
armors = [ Armor 0 0
, Armor 13 1
, Armor 31 2
, Armor 53 3
, Armor 75 4
, Armor 102 5
]
rings = [ Ring 0 0 0
, Ring 0 0 0
, Ring 25 1 0
, Ring 50 2 0
, Ring 100 3 0
, Ring 20 0 1
, Ring 40 0 2
, Ring 80 0 3
]
boss :: Player
boss = Player 100 8 2 0
combinations :: [Player]
combinations = [ Player { hp = 100
, dmg = dmgw w + dmgr r1 + dmgr r2
, armor = armora a + armorr r1 + armorr r2
, price = costw w + costa a + costr r1 + costr r2
}
| w <- weapons shop
, a <- armors shop
, r1 <- rings shop
, r2 <- rings shop
, costr r1 == 0 && costr r1 == 0 || r1 /= r2
]
simulate :: Player -> Player -> Bool -> Bool
simulate p _ _ | hp p <= 0 = False
simulate _ b _ | hp b <= 0 = True
simulate p b True = simulate p (damage b (dmg p)) False
simulate p b False = simulate (damage p (dmg b)) b True
day21 :: IO ()
day21 = print $ foldl f (maxBound :: Int) combinations
where f mini c = if price c < mini && simulate c boss True
then price c
else mini
{- Part Two -}
day21' :: IO ()
day21' = print $ foldl f (minBound :: Int) combinations
where f maxi c = if price c > maxi && not (simulate c boss True)
then price c
else maxi
| Rydgel/advent-of-code | src/Day21.hs | bsd-3-clause | 2,494 | 0 | 12 | 1,100 | 917 | 480 | 437 | 57 | 2 |
{-# LANGUAGE QuasiQuotes #-}
import LiquidHaskell
data Bob = B {foo :: Int}
[lq| data Bob = B {foo :: Int} |]
[lq| foo :: x:Bob -> {v:Int | v = foo x} |]
[lq| invariant {v:Bob | foo v == 10} |]
mk :: a -> Bob
mk = undefined
[lq| propFAIL :: {v:_ | foo v = 10} |]
propFAIL = mk ()
[lq| propOK :: {v:_ | foo v = 10} |]
propOK = let z = mk () in z
| spinda/liquidhaskell | tests/gsoc15/unknown/pos/invlhs.hs | bsd-3-clause | 358 | 0 | 10 | 97 | 100 | 60 | 40 | 12 | 1 |
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE OverloadedStrings #-}
module Glug.Net (
realTlsGetUrl
) where
import qualified Data.ByteString.Lazy as BSL
import qualified Network.HTTP.Conduit as C
import Control.Concurrent (threadDelay)
import Control.Monad.Except (MonadError, throwError)
import Control.Monad.IO.Class (MonadIO, liftIO)
import Network.HTTP.Types (statusCode)
import Glug.Constants (useragent)
-- | Performs a network request for the resource at the given URL
realTlsGetUrl :: (MonadError String m, MonadIO m) =>
String
-- ^ the URL to request
-> m BSL.ByteString
-- ^ the response
realTlsGetUrl url = do
liftIO . putStrLn $ "Getting URL " ++ url
req <- parseRequest url
mgr <- liftIO $ C.newManager C.tlsManagerSettings
let req' = req { C.requestHeaders = [("User-Agent", useragent)] }
resp <- C.httpLbs req' mgr
if (statusCode . C.responseStatus $ resp) == 409
-- rate limited, pause and retry
-- TODO make this a more sane strategy
then do
liftIO $ putStrLn "RATE LIMITED: delaying and retrying"
liftIO $ threadDelay 100 -- pause 100ms
realTlsGetUrl url
-- response available
else return $ C.responseBody resp
parseRequest :: (MonadError String m) => String -> m C.Request
parseRequest url = case C.parseRequest url of
Nothing -> throwError "could not parse url"
Just req -> return req
| robmcl4/Glug | src/Glug/Net.hs | bsd-3-clause | 1,654 | 0 | 13 | 540 | 348 | 188 | 160 | 30 | 2 |
-- Fishtank: 3D OpenGL demo with flocking boids
-- Author: Matthew Danish. License: BSD3 (see LICENSE file)
--
-- Assorted convenience utility functions.
module Util where
import Graphics.Rendering.OpenGL.GL
import Data.List (foldl')
import System.Random
type Vector3d = Vector3 GLdouble
uncurry3 f (a, b, c) = f a b c
color3d' = color :: Color3 GLdouble -> IO ()
color3d = color3d' . uncurry3 Color3
scaled' = scale :: GLdouble -> GLdouble -> GLdouble -> IO ()
scaled = uncurry3 scaled'
vertex3d' = vertex :: Vertex3 GLdouble -> IO ()
vertex3d = vertex3d' . uncurry3 Vertex3
normal3d' = normal :: Normal3 GLdouble -> IO ()
normal3d = normal3d' . uncurry3 Normal3
rotated' = rotate :: GLdouble -> Vector3d -> IO ()
rotated a = rotated' a . uncurry3 Vector3
translated' = translate :: Vector3d -> IO ()
translated = translated' . uncurry3 Vector3
texCoord2d' = texCoord :: TexCoord2 GLdouble -> IO ()
texCoord2d = texCoord2d' . uncurry TexCoord2
r2d a = a * 180 / pi -- radian to degree
-- some vector ops
magnitude (Vector3 x y z) = sqrt (x*x + y*y + z*z)
s `vecScale` Vector3 x y z = Vector3 (s*x) (s*y) (s*z)
Vector3 x1 y1 z1 `vecAdd` Vector3 x2 y2 z2 = Vector3 (x1+x2) (y1+y2) (z1+z2)
Vector3 x1 y1 z1 `vecSub` Vector3 x2 y2 z2 = Vector3 (x1-x2) (y1-y2) (z1-z2)
vecReciprocal (Vector3 x y z) = Vector3 (1/x) (1/y) (1/z)
vecSum l = foldl' vecAdd (Vector3 0 0 0) l
(Vector3 x1 y1 z1) `dotP` (Vector3 x2 y2 z2) = x1*x2 + y1*y2 + z1*z2
(Vector3 x1 y1 z1) `crossP` (Vector3 x2 y2 z2) =
Vector3 (y1*z2 - z1*y2) (z1*x2 - z2*x1) (x1*y2 - x2*y1)
projectV v n = Vector3 ux uy uz
where
Vector3 vx vy vz = v
Vector3 nx ny nz = n
nmag = sqrt (nx*nx + ny*ny + nz*nz)
(nx',ny',nz') = (nx / nmag, ny / nmag, nz / nmag)
dp = vx * nx' + vy * ny' + vz * nz'
ux = vx - nx' * dp
uy = vy - ny' * dp
uz = vz - nz' * dp
angleBetween u v = r2d (acos ((u `dotP` v) / (magnitude u * magnitude v)))
randomVector3d :: IO Vector3d
randomVector3d = do
x <- randomRIO (-1, 1)
y <- randomRIO (-1, 1)
z <- randomRIO (-1, 1)
return $ Vector3 x y z
randomChoice :: [a] -> IO a
randomChoice [] = error "randomChoice: empty list"
randomChoice l = (l !!) `fmap` randomRIO (0, length l - 1)
| mrd/fishtank | Util.hs | bsd-3-clause | 2,327 | 0 | 13 | 593 | 1,090 | 565 | 525 | 49 | 1 |
module Data.Profiles where
import Model
fridge :: MachineDescription
fridge = MachineDescription
{ name = "Philipp G. Fridge"
, behavior = [ ("On", Repeat (square 0 2 100 2))
, ("Standby", Once $ constant 15)
, ("Off", Once $ constant 0) ]
, transitions = [ ("On", "Off", 1, 0)
, ("Off", "On", 0, 10)
, ("On", "Standby", 5, 10)
, ("Standby", "On", 5, 10)
, ("Standby", "Off", 1, 0)
, ("Off", "Standby", 2, 10) ] }
| redelmann/e-zimod-server | Data/Profiles.hs | bsd-3-clause | 563 | 0 | 11 | 225 | 185 | 116 | 69 | 14 | 1 |
{-# LANGUAGE FlexibleContexts #-}
module Cloud.AWS.EC2.Region
( describeRegions
) where
import Data.Text (Text)
import Data.Conduit
import Control.Applicative
import Control.Monad.Trans.Resource (MonadResource, MonadBaseControl)
import Cloud.AWS.Lib.Parser.Unordered ((.<))
import Cloud.AWS.EC2.Internal
import Cloud.AWS.EC2.Types
import Cloud.AWS.EC2.Query
describeRegions
:: (MonadResource m, MonadBaseControl IO m)
=> [Text] -- ^ RegionNames
-> [Filter] -- ^ Filters
-> EC2 m (ResumableSource m Region)
describeRegions regions filters =
ec2QuerySource "DescribeRegions" params path regionInfoConduit
where
path = itemsPath "regionInfo"
params =
[ "RegionName" |.#= regions
, filtersParam filters
]
regionInfoConduit = itemConduit $ \e -> Region
<$> e .< "regionName"
<*> e .< "regionEndpoint"
| worksap-ate/aws-sdk | Cloud/AWS/EC2/Region.hs | bsd-3-clause | 884 | 0 | 12 | 180 | 213 | 126 | 87 | 25 | 1 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
module API where
import Control.Monad (unless)
import Control.Monad.Trans (liftIO)
import Control.Monad.Trans.Either (EitherT, left)
import Crypto.Hash.SHA256 (hash)
import Data.Functor (($>))
import Data.Maybe (isJust)
import Data.Text (Text, pack, unpack)
import Data.Text.Encoding (decodeLatin1, encodeUtf8)
import Data.Time.Clock.POSIX (getPOSIXTime)
import Servant hiding (JSON)
import Web.JWT (Algorithm (HS256), JWT,
JWTClaimsSet, Secret, VerifiedJWT,
claims, decodeAndVerifySignature,
def, encodeSigned, iat, intDate,
iss, secret, stringOrURI,
stringOrURIToText, sub)
import DB
import Types
encrypt :: Text -> Text
encrypt = decodeLatin1 . hash . encodeUtf8
getBlogPosts :: EitherT ServantErr IO [BlogPost]
getBlogPosts = liftIO $ queryBlogPosts
createBlogPost :: Maybe Text -> NewBlogPost -> EitherT ServantErr IO BlogPost
createBlogPost mjwt blogPost = do
jwt <- checkAuth mjwt
let Just userId = fmap (read . unpack . stringOrURIToText) $ sub $ claims jwt
liftIO $ insertBlogPost userId blogPost
getBlogPost :: Slug -> EitherT ServantErr IO BlogPost
getBlogPost slug = do
blogPost <- liftIO $ queryBlogPost slug
case blogPost of
Just blogPost' -> return blogPost'
Nothing -> left $ err503 { errBody = "No blog posts found with the provided slug." }
createUser :: Maybe Text -> NewUser -> EitherT ServantErr IO User
createUser jwt NewUser {..} = do
checkAuth jwt
liftIO $ insertUser $ NewUser nuUsername (encrypt nuPassword) nuEmail
jwtSecret :: Secret
jwtSecret = secret "Fabianz secret"
makeClaimsSet :: Int -> IO JWTClaimsSet
makeClaimsSet uid = do
now <- getPOSIXTime
return $ def { iss = stringOrURI "Fabe"
, sub = stringOrURI . pack . show $ uid
, iat = intDate now
}
makeLoginRes :: User -> IO LoginRes
makeLoginRes User {..} = do
claimsSet <- makeClaimsSet uId
return $ LoginRes
{ jwt = encodeSigned HS256 jwtSecret claimsSet
, user = PublicUser uId uUsername uEmail
}
login :: LoginReq -> EitherT ServantErr IO LoginRes
login LoginReq {..} = do
user <- liftIO $ queryUserByUsername username
case user of
Just user' -> do
if encrypt password == uPassword user'
then liftIO $ makeLoginRes user'
else left $ ServantErr 400 "Username/password pair did not match" "" []
Nothing -> left $ ServantErr 400 "User not found" "" []
checkAuth :: Maybe Text -> EitherT ServantErr IO (JWT VerifiedJWT)
checkAuth = maybe unauthorized (runCheck . decodeAndVerifySignature jwtSecret)
where
runCheck Nothing = unauthorized
runCheck (Just verifiedJWT) = return verifiedJWT
unauthorized =
left $ ServantErr 401 "You are not authenticated. Please sign-in" "" []
api :: Proxy AppAPI
api = Proxy
| yemi/fabian-blog | server/Api.hs | bsd-3-clause | 3,294 | 0 | 15 | 1,026 | 859 | 449 | 410 | 71 | 3 |
module Data.HashPSQ
( -- * Type
HashPSQ
-- * Query
, null
, size
, member
, lookup
, findMin
-- * Construction
, empty
, singleton
-- * Insertion
, insert
-- * Delete/update
, delete
, alter
, alterMin
-- * Lists
, fromList
, toList
, keys
-- * Views
, insertView
, deleteView
, minView
-- * Traversal
, map
, fold'
) where
import Prelude hiding (foldr, lookup, map, null)
import Data.HashPSQ.Internal
| meiersi/psqueues-old | src/Data/HashPSQ.hs | bsd-3-clause | 577 | 0 | 5 | 252 | 100 | 69 | 31 | 24 | 0 |
{-# LANGUAGE OverloadedStrings #-}
module SearchRepos where
import qualified Github.Search as Github
import qualified Github.Data as Github
import Control.Monad (forM,forM_)
import Data.Maybe (fromMaybe)
import Data.List (intercalate)
import System.Environment (getArgs)
import Text.Printf (printf)
import Data.Time.Clock (getCurrentTime, UTCTime(..))
import Data.Time.LocalTime (utc,utcToLocalTime,localDay,localTimeOfDay,TimeOfDay(..))
import Data.Time.Calendar (toGregorian)
main = do
args <- getArgs
date <- case args of
(x:_) -> return x
otherwise -> today
let query = "q=language%3Ahaskell created%3A>" ++ date ++ "&per_page=100"
let auth = Nothing
result <- Github.searchRepos' auth query
case result of
Left e -> putStrLn $ "Error: " ++ show e
Right r -> do forM_ (Github.searchReposRepos r) (\r -> do
putStrLn $ formatRepo r
putStrLn ""
)
putStrLn $ "Count: " ++ show n ++ " Haskell repos created since " ++ date
where n = Github.searchReposTotalCount r
-- | return today (in UTC) formatted as YYYY-MM-DD
today :: IO String
today = do
now <- getCurrentTime
let day = localDay $ utcToLocalTime utc now
(y,m,d) = toGregorian day
in return $ printf "%d-%02d-%02d" y m d
formatRepo :: Github.Repo -> String
formatRepo r =
let fields = [ ("Name", Github.repoName)
,("URL", Github.repoHtmlUrl)
,("Description", orEmpty . Github.repoDescription)
,("Created-At", formatMaybeDate . Github.repoCreatedAt)
,("Pushed-At", formatMaybeDate . Github.repoPushedAt)
]
in intercalate "\n" $ map fmt fields
where fmt (s,f) = fill 12 (s ++ ":") ++ " " ++ f r
orEmpty = fromMaybe ""
fill n s = s ++ replicate n' ' '
where n' = max 0 (n - length s)
formatMaybeDate = maybe "???" formatDate
formatDate = show . Github.fromGithubDate
| thoughtbot/github | samples/Search/SearchRepos.hs | bsd-3-clause | 1,996 | 0 | 18 | 529 | 618 | 328 | 290 | 47 | 3 |
module Web.Dom.Types where
import Data.Text (Text)
data NodeType = El | Tx
-- | Type synonym for 'Text' which indicates a tag name.
type Tag = Text
-- | Type synonym for 'Text' which indicates a id.
type Id = Text
-- | Type synonym for 'Text' which indicates a class.
type Class = Text
-- | Type synonym for 'Text' which indicates an attribute name.
type Attr = Text
| tel/reactional | src/Web/Dom/Types.hs | bsd-3-clause | 390 | 0 | 5 | 93 | 55 | 37 | 18 | 7 | 0 |
{-# LANGUAGE OverloadedStrings #-}
import Test.Framework (defaultMain)
import Test.HTMLTests
import Property.HTMLProperties
import Property.ParserProperties
tests = [ htmlTests
, htmlProperties
, parserProperties
]
main :: IO ()
main = defaultMain tests
| qnnguyen/howser | test/Spec.hs | bsd-3-clause | 281 | 0 | 6 | 57 | 59 | 34 | 25 | 10 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module IrcScanner.EditKeywordsPage where
import IrcScanner.Types as TY
import Snap
import Snap.Snaplet.Heist
import Control.Lens
import Control.Monad.IO.Class(liftIO)
--import Control.Monad.Trans(lift)
import Control.Monad.Reader(ask,lift)
import Data.IORef(readIORef)
import Heist.Interpreted
import Heist
import Data.Map.Syntax((##))
import Data.Text as T(Text,lines,pack,append)
-- import Control.Monad.Trans.Either (runEitherT,left, EitherT(..))
-- import Control.Monad.Trans (lift)
-- import Data.ByteString(ByteString)
import Data.Text.Encoding(decodeUtf8,encodeUtf8)
import IrcScanner.SnapUtil
import IrcScanner.KeywordRulesParser
import IrcScanner.Index(tryIndex,addIndex,deleteAllIndexes,updateIState)
allSplices :: Text -> Splices (SnapletISplice x)
allSplices rfc =
do
"rulesFileContents" ## textSplice rfc
"results" ## textSplice ""
editKeywordsHandler :: HasHeist x => Handler x IrcSnaplet ()
editKeywordsHandler = do
s <- ask
st <- liftIO $ readIORef (view (iconfig . cstate) s)
renderWithSplices "edit_keywords" (allSplices (_skwFileContents st))
editKeywordsTestHandler :: Handler IrcSnaplet IrcSnaplet ()
editKeywordsTestHandler = handleETHandler $ do
rulesFileContents <- getParamET "rulesFileContents" >>= (return . decodeUtf8)
results <- return $ parseKwFile $ T.lines rulesFileContents
lift $ logError $ encodeUtf8 $ "result is " `append` (pack (show results))
case results of
Left errors -> lift $ renderWithSplices "edit_keywords_errors" $ kwErrorsSplices errors
Right indexes -> lift $ renderWithSplices "edit_keywords_indexes" $ kwIndexesSplices indexes
editKeywordsSaveHandler :: Handler IrcSnaplet IrcSnaplet ()
editKeywordsSaveHandler = handleETHandler $ do
rulesFileContents <- getParamET "rulesFileContents" >>= (return . decodeUtf8)
results <- return $ parseKwFile $ T.lines rulesFileContents
lift $ logError $ encodeUtf8 $ "result is " `append` (pack (show results))
case results of
Left errors -> lift $ renderWithSplices "edit_keywords_errors" $ kwErrorsSplices errors
Right indexes -> lift $ do
runIST' $
do
c <- ask
lift $ saveKwFile rulesFileContents c
_ <- runIST' $ deleteAllIndexes >> mapM addIndex indexes
runIST' $ updateIState (\s -> (s { _skwFileContents = rulesFileContents },()))
render "edit_keywords_save_results"
kwErrorsSplices :: [Text] -> Splices (SnapletISplice IrcSnaplet)
kwErrorsSplices lns =
do
"errors" ## mapSplices kwErrorSplice $ filter (\(l,_) -> l /= "") $
Prelude.zip lns [1..]
kwErrorSplice :: Monad x => (Text, Int) -> Splice x
kwErrorSplice (ln, i) =
do
runChildrenWithText $
do
"errorText" ## ln
"errorLine" ## (pack (show i))
kwIndexesSplices :: [TY.Index] -> Splices (SnapletISplice IrcSnaplet)
kwIndexesSplices indexes =
do
"indexes" ## mapSplices kwIndexSplice indexes
kwIndexSplice :: TY.Index -> SnapletISplice IrcSnaplet
kwIndexSplice ind =
do
ranges <- runIST $ tryIndex ind
runChildrenWithText $
do
"displayName" ## (_idisplayName ind)
"matchCount" ## (pack (show (length ranges)))
-- filter (\l -> l /= "") $ mapInd doit lns
-- where
-- doit :: Text -> Int -> Text
-- doit "" _ = ""
-- doit t i = (show i) `append` ": " `append` t
| redfish64/IrcScanner | src/IrcScanner/EditKeywordsPage.hs | bsd-3-clause | 3,433 | 0 | 20 | 684 | 936 | 480 | 456 | 74 | 2 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE TypeSynonymInstances #-}
module DirectoryServer where
import Control.Monad.Trans.Except
import Control.Monad.Trans.Resource
import Control.Monad.IO.Class
import Data.Aeson
import Data.Aeson.TH
import Data.Bson.Generic
import GHC.Generics
import Network.Wai hiding(Response)
import Network.Wai.Handler.Warp
import Network.Wai.Logger
import Servant
import Servant.API
import Servant.Client
import System.IO
import System.Directory
import System.Environment (getArgs, getProgName, lookupEnv)
import System.Log.Formatter
import System.Log.Handler (setFormatter)
import System.Log.Handler.Simple
import System.Log.Handler.Syslog
import System.Log.Logger
import Data.Bson.Generic
import qualified Data.List as DL
import Data.Maybe (catMaybes)
import Data.Text (pack, unpack)
import Data.Time.Clock (UTCTime, getCurrentTime)
import Data.Time.Format (defaultTimeLocale, formatTime)
import Database.MongoDB
import Control.Monad (when)
import Network.HTTP.Client (newManager, defaultManagerSettings)
--manager = newManager defaultManagerSettings
data File = File {
fileName :: FilePath,
fileContent :: String
} deriving (Eq, Show, Generic)
instance ToJSON File
instance FromJSON File
data Response = Response{
response :: String
} deriving (Eq, Show, Generic)
instance ToJSON Response
instance FromJSON Response
data FileServer = FileServer{
id :: String,
fsaddress :: String,
fsport :: String
} deriving (Eq, Show, Generic)
instance ToJSON FileServer
instance FromJSON FileServer
instance ToBSON FileServer
instance FromBSON FileServer
data FileMapping = FileMapping{
fmfileName :: String,
fmaddress :: String,
fmport :: String
} deriving (Eq, Show, Generic)
instance ToJSON FileMapping
instance FromJSON FileMapping
instance ToBSON FileMapping
instance FromBSON FileMapping
type ApiHandler = ExceptT ServantErr IO
serverport :: String
serverport = "7008"
serverhost :: String
serverhost = "localhost"
type DirectoryApi =
"join" :> ReqBody '[JSON] FileServer :> Post '[JSON] Response :<|>
"open" :> Capture "fileName" String :> Get '[JSON] File :<|>
"close" :> ReqBody '[JSON] File :> Post '[JSON] Response
type FileApi =
"files" :> Get '[JSON] [FilePath] :<|>
"download" :> Capture "fileName" String :> Get '[JSON] File :<|>
"upload" :> ReqBody '[JSON] File :> Post '[JSON] Response -- :<|>
fileApi :: Proxy FileApi
fileApi = Proxy
files:: ClientM [FilePath]
download :: String -> ClientM File
upload :: File -> ClientM Response
files :<|> download :<|> upload = client fileApi
getFilesQuery :: ClientM[FilePath]
getFilesQuery = do
get_files <- files
return(get_files)
downloadQuery :: String -> ClientM File
downloadQuery fname = do
get_download <- download (fname)
return(get_download)
directoryApi :: Proxy DirectoryApi
directoryApi = Proxy
server :: Server DirectoryApi
server =
fsJoin :<|>
DirectoryServer.openFile :<|>
closeFile
directoryApp :: Application
directoryApp = serve directoryApi server
mkApp :: IO()
mkApp = do
run (read (serverport) ::Int) directoryApp
storefs:: FileServer -> IO()
storefs fs@(FileServer key _ _) = liftIO $ do
--warnLog $ "Storing file under key " ++ key ++ "."
withMongoDbConnection $ upsert (select ["id" =: key] "FILESERVER_RECORD") $ toBSON fs
-- return True
storefm :: FileMapping -> IO()
storefm fm@(FileMapping key _ _) = liftIO $ do
warnLog $ "Storing file under key " ++ key ++ "."
withMongoDbConnection $ upsert (select ["id" =: key] "FILEMAPPING_RECORD") $ toBSON fm
-- return True
getStoreFm :: FileServer -> IO()
getStoreFm fs = liftIO $ do
manager <- newManager defaultManagerSettings
res <- runClientM getFilesQuery (ClientEnv manager (BaseUrl Http (fsaddress fs) (read(fsport fs)) ""))
case res of
Left err -> putStrLn $ "Error: " ++ show err
Right response -> map (storefm (fsaddress fs) (fsport fs)) response
-- return True
fsJoin :: FileServer -> ApiHandler Response
fsJoin fs = liftIO $ do
storefs fs
getStoreFm fs
return (Response "Success")
searchFileMappings :: String -> Maybe FileMapping
searchFileMappings key = liftIO $ do
warnLog $ "Searching for value for key: " ++ key
withMongoDbConnection $ do
docs <- find (select ["fmfileName" =: key] "FILEMAPPING_RECORD") >>= drainCursor
file <- DL.map (\ b -> fromBSON b :: Maybe FileMapping) docs
return file
openFileQuery :: String -> FileMapping -> File
openFileQuery key fm = do
manager <- newManager defaultManagerSettings
res <- runClientM (downloadQuery key) (ClientEnv manager (BaseUrl Http (fmaddress fm) (read(fmport fm)) ""))
case res of
Left err -> putStrLn $ "Error: " ++ show err
Right response -> return response
openFile :: String -> ApiHandler File
openFile key = do
fm <- searchFileMappings key
case fm of
Nothing -> putStrLn $ "Error: " ++ "File not found"
Just filemapping -> do
file <- openFileQuery key filemapping
return file
-- | Logging stuff
iso8601 :: UTCTime -> String
iso8601 = formatTime defaultTimeLocale "%FT%T%q%z"
-- global loggin functions
debugLog, warnLog, errorLog :: String -> IO ()
debugLog = doLog debugM
warnLog = doLog warningM
errorLog = doLog errorM
noticeLog = doLog noticeM
doLog f s = getProgName >>= \ p -> do
t <- getCurrentTime
f p $ (iso8601 t) ++ " " ++ s
withLogging act = withStdoutLogger $ \aplogger -> do
lname <- getProgName
llevel <- logLevel
updateGlobalLogger lname
(setLevel $ case llevel of
"WARNING" -> WARNING
"ERROR" -> ERROR
_ -> DEBUG)
act aplogger
-- | Mongodb helpers...
-- | helper to open connection to mongo database and run action
-- generally run as follows:
-- withMongoDbConnection $ do ...
--
withMongoDbConnection :: Action IO a -> IO a
withMongoDbConnection act = do
ip <- mongoDbIp
port <- mongoDbPort
database <- mongoDbDatabase
pipe <- connect (host ip)
ret <- runResourceT $ liftIO $ access pipe master (pack database) act
Database.MongoDB.close pipe
return ret
-- | helper method to ensure we force extraction of all results
-- note how it is defined recursively - meaning that draincursor' calls itself.
-- the purpose is to iterate through all documents returned if the connection is
-- returning the documents in batch mode, meaning in batches of retruned results with more
-- to come on each call. The function recurses until there are no results left, building an
-- array of returned [Document]
drainCursor :: Cursor -> Action IO [Document]
drainCursor cur = drainCursor' cur []
where
drainCursor' cur res = do
batch <- nextBatch cur
if null batch
then return res
else drainCursor' cur (res ++ batch)
-- | Environment variable functions, that return the environment variable if set, or
-- default values if not set.
-- | The IP address of the mongoDB database that devnostics-rest uses to store and access data
mongoDbIp :: IO String
mongoDbIp = defEnv "MONGODB_IP" Prelude.id "database" True
-- | The port number of the mongoDB database that devnostics-rest uses to store and access data
mongoDbPort :: IO Integer
mongoDbPort = defEnv "MONGODB_PORT" read 27017 False -- 27017 is the default mongodb port
-- | The name of the mongoDB database that devnostics-rest uses to store and access data
mongoDbDatabase :: IO String
mongoDbDatabase = defEnv "MONGODB_DATABASE" Prelude.id "USEHASKELLDB" True
-- | Determines log reporting level. Set to "DEBUG", "WARNING" or "ERROR" as preferred. Loggin is
-- provided by the hslogger library.
logLevel :: IO String
logLevel = defEnv "LOG_LEVEL" Prelude.id "DEBUG" True
-- | Helper function to simplify the setting of environment variables
-- function that looks up environment variable and returns the result of running funtion fn over it
-- or if the environment variable does not exist, returns the value def. The function will optionally log a
-- warning based on Boolean tag
defEnv :: Show a
=> String -- Environment Variable name
-> (String -> a) -- function to process variable string (set as 'id' if not needed)
-> a -- default value to use if environment variable is not set
-> Bool -- True if we should warn if environment variable is not set
-> IO a
defEnv env fn def doWarn = lookupEnv env >>= \ e -> case e of
Just s -> return $ fn s
Nothing -> do
when doWarn (doLog warningM $ "Environment variable: " ++ env ++
" is not set. Defaulting to " ++ (show def))
return def
| Garygunn94/DFS | DirectoryServer/.stack-work/intero/intero234673Fi.hs | bsd-3-clause | 9,551 | 38 | 18 | 2,391 | 2,256 | 1,172 | 1,084 | 205 | 3 |
module Ten where
import SubsetSelection(getSubset)
import Histogram
import QaryGraphs
unQ :: Qary -> Int
unQ (Qary n) = n
updown :: [Int] -> [Bool]
updown (x : y : zs) = (y >= x) : updown (y:zs)
updown _ = []
f :: Int -> [Int] -> Int
f n xs = rem (sum . getSubset [1..] $ updown xs) n
g :: Int -> [Int] -> Int
g q xs = rem (sum xs) q
h q n xs = q * f n xs + g q xs
test q n = histogram . map (h q n . map unQ) $ allQStrings q n
maxtest q = maximum . test q | cullina/Extractor | src/Ten.hs | bsd-3-clause | 465 | 0 | 10 | 122 | 280 | 144 | 136 | 16 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Tinc.ConfigSpec where
import Test.Hspec
import Test.Mockery.Directory
import Tinc.Config
spec :: Spec
spec = do
describe "getAdditionalDependencies" $ do
it "return additional dependencies from tinc.yaml" $ do
inTempDirectory $ do
writeFile "tinc.yaml" $ unlines [
"dependencies:"
, " - foo"
]
getAdditionalDependencies `shouldReturn` ["foo"]
context "when tinc.yaml does not exist" $ do
it "returns an empty list" $ do
inTempDirectory $ do
getAdditionalDependencies `shouldReturn` []
| robbinch/tinc | test/Tinc/ConfigSpec.hs | mit | 650 | 0 | 20 | 192 | 129 | 65 | 64 | 18 | 1 |
module Domain.Product where
import Import
import Domain.Types
createProduct :: Product -> AppM(Product)
createProduct product = undefined
editProduct :: Product -> AppM(Product)
editProduct product = undefined
getProduct :: ProductID -> AppM(Product)
getProduct pid = undefined
data ProductFilter = ProductFilter { ids :: [ProductID]
, q :: Text
, title :: Text
-- and more such filters can come here
}
filterProducts :: ProductFilter -> AppM([Product])
filterProducts pfilter = undefined
| vacationlabs/haskell-webapps | skeleton/src/Domain/Product.hs | mit | 635 | 0 | 9 | 214 | 140 | 79 | 61 | 14 | 1 |
-- https://www.codewars.com/kata/one-function-many-arguments-haskell-style
{-# LANGUAGE InstanceSigs #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE FunctionalDependencies #-}
module PolyvariadicFunctions where
class Poly a r t | t -> r where
poly :: ([a] -> r) -> t
instance Poly a r r where
poly :: ([a] -> r) -> r
poly f = f []
instance (a ~ a', Poly a r t) => Poly a' r (a -> t) where
poly :: ([a] -> r) -> a -> t
poly f x = poly f' where f' xs = f (x : xs)
polyAdd :: Poly Int Int t => t
polyAdd = poly (sum :: [Int] -> Int)
polyWords :: Poly String String t => t
polyWords = poly unwords
polyList :: Poly a [a] t => t
polyList = poly id
| airtial/Codegames | codewars/one-function-many-arguments-haskell-style.hs | gpl-2.0 | 767 | 0 | 10 | 161 | 277 | 150 | 127 | -1 | -1 |
--- * -*- outline-regexp:"--- \\*"; -*-
--- ** doc
-- In Emacs, use TAB on lines beginning with "-- *" to collapse/expand sections.
{-|
File reading/parsing utilities used by multiple readers, and a good
amount of the parsers for journal format, to avoid import cycles
when JournalReader imports other readers.
Some of these might belong in Hledger.Read.JournalReader or Hledger.Read.
-}
--- ** language
{-# LANGUAGE BangPatterns #-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE NoMonoLocalBinds #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE Rank2Types #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE TypeFamilies #-}
--- ** exports
module Hledger.Read.Common (
Reader (..),
InputOpts(..),
HasInputOpts(..),
definputopts,
rawOptsToInputOpts,
-- * parsing utilities
parseAndFinaliseJournal,
parseAndFinaliseJournal',
journalFinalise,
journalCheckAccountsDeclared,
journalCheckCommoditiesDeclared,
journalCheckPayeesDeclared,
journalAddForecast,
journalAddAutoPostings,
setYear,
getYear,
setDefaultCommodityAndStyle,
getDefaultCommodityAndStyle,
getDefaultAmountStyle,
getAmountStyle,
addDeclaredAccountTags,
addDeclaredAccountType,
pushParentAccount,
popParentAccount,
getParentAccount,
addAccountAlias,
getAccountAliases,
clearAccountAliases,
journalAddFile,
-- * parsers
-- ** transaction bits
statusp,
codep,
descriptionp,
-- ** dates
datep,
datetimep,
secondarydatep,
-- ** account names
modifiedaccountnamep,
accountnamep,
-- ** account aliases
accountaliasp,
-- ** amounts
spaceandamountormissingp,
amountp,
amountp',
mamountp',
amountpwithmultiplier,
commoditysymbolp,
priceamountp,
balanceassertionp,
lotpricep,
numberp,
fromRawNumber,
rawnumberp,
-- ** comments
isLineCommentStart,
isSameLineCommentStart,
multilinecommentp,
emptyorcommentlinep,
followingcommentp,
transactioncommentp,
postingcommentp,
-- ** bracketed dates
bracketeddatetagsp,
-- ** misc
noncommenttextp,
noncommenttext1p,
singlespacedtext1p,
singlespacednoncommenttext1p,
singlespacedtextsatisfying1p,
singlespacep,
skipNonNewlineSpaces,
skipNonNewlineSpaces1,
aliasesFromOpts,
-- * tests
tests_Common,
)
where
--- ** imports
import Control.Applicative.Permutations (runPermutation, toPermutationWithDefault)
import qualified Control.Monad.Fail as Fail (fail)
import Control.Monad.Except (ExceptT(..), liftEither, runExceptT, throwError)
import Control.Monad.State.Strict hiding (fail)
import Data.Bifunctor (bimap, second)
import Data.Char (digitToInt, isDigit, isSpace)
import Data.Decimal (DecimalRaw (Decimal), Decimal)
import Data.Either (lefts, rights)
import Data.Function ((&))
import Data.Functor ((<&>), ($>))
import Data.List (find, genericReplicate, union)
import Data.List.NonEmpty (NonEmpty(..))
import Data.Maybe (catMaybes, fromMaybe, isJust, listToMaybe)
import qualified Data.Map as M
import qualified Data.Semigroup as Sem
import Data.Text (Text)
import qualified Data.Text as T
import Data.Time.Calendar (Day, fromGregorianValid, toGregorian)
import Data.Time.Clock.POSIX (getPOSIXTime)
import Data.Time.LocalTime (LocalTime(..), TimeOfDay(..))
import Data.Word (Word8)
import Text.Megaparsec
import Text.Megaparsec.Char (char, char', digitChar, newline, string)
import Text.Megaparsec.Char.Lexer (decimal)
import Text.Megaparsec.Custom
(attachSource, customErrorBundlePretty, finalErrorBundlePretty, parseErrorAt, parseErrorAtRegion)
import Hledger.Data
import Hledger.Query (Query(..), filterQuery, parseQueryTerm, queryEndDate, queryStartDate, queryIsDate, simplifyQuery)
import Hledger.Reports.ReportOptions (ReportOpts(..), queryFromFlags, rawOptsToReportOpts)
import Hledger.Utils
import Text.Printf (printf)
import Hledger.Read.InputOptions
--- ** doctest setup
-- $setup
-- >>> :set -XOverloadedStrings
--- ** types
-- main types; a few more below
-- | A hledger journal reader is a triple of storage format name, a
-- detector of that format, and a parser from that format to Journal.
-- The type variable m appears here so that rParserr can hold a
-- journal parser, which depends on it.
data Reader m = Reader {
-- The canonical name of the format handled by this reader
rFormat :: StorageFormat
-- The file extensions recognised as containing this format
,rExtensions :: [String]
-- The entry point for reading this format, accepting input options, file
-- path for error messages and file contents, producing an exception-raising IO
-- action that produces a journal or error message.
,rReadFn :: InputOpts -> FilePath -> Text -> ExceptT String IO Journal
-- The actual megaparsec parser called by the above, in case
-- another parser (includedirectivep) wants to use it directly.
,rParser :: MonadIO m => ErroringJournalParser m ParsedJournal
}
instance Show (Reader m) where show r = rFormat r ++ " reader"
-- | Parse an InputOpts from a RawOpts and a provided date.
-- This will fail with a usage error if the forecast period expression cannot be parsed.
rawOptsToInputOpts :: Day -> RawOpts -> InputOpts
rawOptsToInputOpts day rawopts =
let noinferprice = boolopt "strict" rawopts || stringopt "args" rawopts == "balancednoautoconversion"
-- Do we really need to do all this work just to get the requested end date? This is duplicating
-- much of reportOptsToSpec.
ropts = rawOptsToReportOpts day rawopts
argsquery = lefts . rights . map (parseQueryTerm day) $ querystring_ ropts
datequery = simplifyQuery . filterQuery queryIsDate . And $ queryFromFlags ropts : argsquery
commodity_styles = either err id $ commodityStyleFromRawOpts rawopts
where err e = error' $ "could not parse commodity-style: '" ++ e ++ "'" -- PARTIAL:
in InputOpts{
-- files_ = listofstringopt "file" rawopts
mformat_ = Nothing
,mrules_file_ = maybestringopt "rules-file" rawopts
,aliases_ = listofstringopt "alias" rawopts
,anon_ = boolopt "anon" rawopts
,new_ = boolopt "new" rawopts
,new_save_ = True
,pivot_ = stringopt "pivot" rawopts
,forecast_ = forecastPeriodFromRawOpts day rawopts
,reportspan_ = DateSpan (queryStartDate False datequery) (queryEndDate False datequery)
,auto_ = boolopt "auto" rawopts
,infer_equity_ = boolopt "infer-equity" rawopts && conversionop_ ropts /= Just ToCost
,balancingopts_ = defbalancingopts{
ignore_assertions_ = boolopt "ignore-assertions" rawopts
, infer_transaction_prices_ = not noinferprice
, commodity_styles_ = Just commodity_styles
}
,strict_ = boolopt "strict" rawopts
,_ioDay = day
}
-- | Get the date span from --forecast's PERIODEXPR argument, if any.
-- This will fail with a usage error if the period expression cannot be parsed,
-- or if it contains a report interval.
forecastPeriodFromRawOpts :: Day -> RawOpts -> Maybe DateSpan
forecastPeriodFromRawOpts d rawopts = do
arg <- maybestringopt "forecast" rawopts
let period = parsePeriodExpr d . stripquotes $ T.pack arg
return $ if null arg then nulldatespan else either badParse (getSpan arg) period
where
badParse e = usageError $ "could not parse forecast period : "++customErrorBundlePretty e
getSpan arg (interval, requestedspan) = case interval of
NoInterval -> requestedspan
_ -> usageError $ "--forecast's argument should not contain a report interval ("
++ show interval ++ " in \"" ++ arg ++ "\")"
-- | Given the name of the option and the raw options, returns either
-- | * a map of successfully parsed commodity styles, if all options where successfully parsed
-- | * the first option which failed to parse, if one or more options failed to parse
commodityStyleFromRawOpts :: RawOpts -> Either String (M.Map CommoditySymbol AmountStyle)
commodityStyleFromRawOpts rawOpts =
foldM (\r -> fmap (\(c,a) -> M.insert c a r) . parseCommodity) mempty optList
where
optList = listofstringopt "commodity-style" rawOpts
parseCommodity optStr = case amountp'' optStr of
Left _ -> Left optStr
Right (Amount acommodity _ astyle _) -> Right (acommodity, astyle)
-- | Given a parser to ParsedJournal, input options, file path and
-- content: run the parser on the content, and finalise the result to
-- get a Journal; or throw an error.
parseAndFinaliseJournal :: ErroringJournalParser IO ParsedJournal -> InputOpts
-> FilePath -> Text -> ExceptT String IO Journal
parseAndFinaliseJournal parser iopts f txt = do
let y = first3 . toGregorian $ _ioDay iopts
initJournal = nulljournal{ jparsedefaultyear = Just y, jincludefilestack = [f] }
eep <- liftIO $ runExceptT $ runParserT (evalStateT parser initJournal) f txt
-- TODO: urgh.. clean this up somehow
case eep of
Left finalParseError -> throwError $ finalErrorBundlePretty $ attachSource f txt finalParseError
Right ep -> case ep of
Left e -> throwError $ customErrorBundlePretty e
Right pj -> journalFinalise iopts f txt pj
-- | Like parseAndFinaliseJournal but takes a (non-Erroring) JournalParser.
-- Also, applies command-line account aliases before finalising.
-- Used for timeclock/timedot.
-- TODO: get rid of this, use parseAndFinaliseJournal instead
parseAndFinaliseJournal' :: JournalParser IO ParsedJournal -> InputOpts
-> FilePath -> Text -> ExceptT String IO Journal
parseAndFinaliseJournal' parser iopts f txt = do
let y = first3 . toGregorian $ _ioDay iopts
initJournal = nulljournal
{ jparsedefaultyear = Just y
, jincludefilestack = [f] }
ep <- liftIO $ runParserT (evalStateT parser initJournal) f txt
-- see notes above
case ep of
Left e -> throwError $ customErrorBundlePretty e
Right pj ->
-- apply any command line account aliases. Can fail with a bad replacement pattern.
case journalApplyAliases (aliasesFromOpts iopts) pj of
Left e -> throwError e
Right pj' -> journalFinalise iopts f txt pj'
-- | Post-process a Journal that has just been parsed or generated, in this order:
--
-- - add misc info (file path, read time)
--
-- - reverse transactions into their original parse order
--
-- - apply canonical commodity styles
--
-- - add tags from account directives to postings' tags
--
-- - add forecast transactions if enabled
--
-- - add tags from account directives to postings' tags (again to affect forecast transactions)
--
-- - add auto postings if enabled
--
-- - add tags from account directives to postings' tags (again to affect auto postings)
--
-- - evaluate balance assignments and balance each transaction
--
-- - check balance assertions if enabled
--
-- - infer equity postings in conversion transactions if enabled
--
-- - infer market prices from costs if enabled
--
-- - check all accounts have been declared if in strict mode
--
-- - check all commodities have been declared if in strict mode
--
journalFinalise :: InputOpts -> FilePath -> Text -> ParsedJournal -> ExceptT String IO Journal
journalFinalise iopts@InputOpts{auto_,infer_equity_,balancingopts_,strict_,_ioDay} f txt pj = do
t <- liftIO getPOSIXTime
liftEither $ do
j <- pj{jglobalcommoditystyles=fromMaybe mempty $ commodity_styles_ balancingopts_}
& journalSetLastReadTime t -- save the last read time
& journalAddFile (f, txt) -- save the main file's info
& journalReverse -- convert all lists to the order they were parsed
& journalAddAccountTypes -- build a map of all known account types
& journalApplyCommodityStyles -- Infer and apply commodity styles - should be done early
<&> journalAddForecast (forecastPeriod iopts pj) -- Add forecast transactions if enabled
<&> journalPostingsAddAccountTags -- Add account tags to postings, so they can be matched by auto postings.
>>= (if auto_ && not (null $ jtxnmodifiers pj)
then journalAddAutoPostings _ioDay balancingopts_ -- Add auto postings if enabled, and account tags if needed
else pure)
>>= journalBalanceTransactions balancingopts_ -- Balance all transactions and maybe check balance assertions.
<&> (if infer_equity_ then journalAddInferredEquityPostings else id) -- Add inferred equity postings, after balancing transactions and generating auto postings
<&> journalInferMarketPricesFromTransactions -- infer market prices from commodity-exchanging transactions
when strict_ $ do
journalCheckAccountsDeclared j -- If in strict mode, check all postings are to declared accounts
journalCheckCommoditiesDeclared j -- and using declared commodities
return j
-- | Apply any auto posting rules to generate extra postings on this journal's transactions.
journalAddAutoPostings :: Day -> BalancingOpts -> Journal -> Either String Journal
journalAddAutoPostings d bopts =
-- Balance all transactions without checking balance assertions,
journalBalanceTransactions bopts{ignore_assertions_=True}
-- then add the auto postings
-- (Note adding auto postings after balancing means #893b fails;
-- adding them before balancing probably means #893a, #928, #938 fail.)
>=> journalModifyTransactions d
-- | Generate periodic transactions from all periodic transaction rules in the journal.
-- These transactions are added to the in-memory Journal (but not the on-disk file).
--
-- The start & end date for generated periodic transactions are determined in
-- a somewhat complicated way; see the hledger manual -> Periodic transactions.
journalAddForecast :: Maybe DateSpan -> Journal -> Journal
journalAddForecast Nothing j = j
journalAddForecast (Just forecastspan) j = j{jtxns = jtxns j ++ forecasttxns}
where
forecasttxns =
map (txnTieKnot . transactionTransformPostings (postingApplyCommodityStyles $ journalCommodityStyles j))
. filter (spanContainsDate forecastspan . tdate)
. concatMap (`runPeriodicTransaction` forecastspan)
$ jperiodictxns j
-- | Check that all the journal's transactions have payees declared with
-- payee directives, returning an error message otherwise.
journalCheckPayeesDeclared :: Journal -> Either String ()
journalCheckPayeesDeclared j = mapM_ checkpayee (jtxns j)
where
checkpayee t
| p `elem` ps = Right ()
| otherwise = Left $
printf "undeclared payee \"%s\"\nat: %s\n\n%s"
(T.unpack p)
(showSourcePosPair $ tsourcepos t)
(linesPrepend2 "> " " " . (<>"\n") . textChomp $ showTransaction t)
where
p = transactionPayee t
ps = journalPayeesDeclared j
-- | Check that all the journal's postings are to accounts declared with
-- account directives, returning an error message otherwise.
journalCheckAccountsDeclared :: Journal -> Either String ()
journalCheckAccountsDeclared j = mapM_ checkacct (journalPostings j)
where
checkacct Posting{paccount,ptransaction}
| paccount `elem` as = Right ()
| otherwise = Left $
(printf "undeclared account \"%s\"\n" (T.unpack paccount))
++ case ptransaction of
Nothing -> ""
Just t -> printf "in transaction at: %s\n\n%s"
(showSourcePosPair $ tsourcepos t)
(linesPrepend " " . (<>"\n") . textChomp $ showTransaction t)
where
as = journalAccountNamesDeclared j
-- | Check that all the commodities used in this journal's postings have been declared
-- by commodity directives, returning an error message otherwise.
journalCheckCommoditiesDeclared :: Journal -> Either String ()
journalCheckCommoditiesDeclared j =
mapM_ checkcommodities (journalPostings j)
where
checkcommodities Posting{..} =
case mfirstundeclaredcomm of
Nothing -> Right ()
Just c -> Left $
(printf "undeclared commodity \"%s\"\n" (T.unpack c))
++ case ptransaction of
Nothing -> ""
Just t -> printf "in transaction at: %s\n\n%s"
(showSourcePosPair $ tsourcepos t)
(linesPrepend " " . (<>"\n") . textChomp $ showTransaction t)
where
mfirstundeclaredcomm =
find (`M.notMember` jcommodities j)
. map acommodity
. (maybe id ((:) . baamount) pbalanceassertion)
. filter (not . isIgnorable)
$ amountsRaw pamount
-- Ignore missing amounts and zero amounts without commodity (#1767)
isIgnorable a = (T.null (acommodity a) && amountIsZero a) || a == missingamt
setYear :: Year -> JournalParser m ()
setYear y = modify' (\j -> j{jparsedefaultyear=Just y})
getYear :: JournalParser m (Maybe Year)
getYear = fmap jparsedefaultyear get
-- | Get the decimal mark that has been specified for parsing, if any
-- (eg by the CSV decimal-mark rule, or possibly a future journal directive).
-- Return it as an AmountStyle that amount parsers can use.
getDecimalMarkStyle :: JournalParser m (Maybe AmountStyle)
getDecimalMarkStyle = do
Journal{jparsedecimalmark} <- get
let mdecmarkStyle = (\c -> Just $ amountstyle{asdecimalpoint=Just c}) =<< jparsedecimalmark
return mdecmarkStyle
setDefaultCommodityAndStyle :: (CommoditySymbol,AmountStyle) -> JournalParser m ()
setDefaultCommodityAndStyle cs = modify' (\j -> j{jparsedefaultcommodity=Just cs})
getDefaultCommodityAndStyle :: JournalParser m (Maybe (CommoditySymbol,AmountStyle))
getDefaultCommodityAndStyle = jparsedefaultcommodity `fmap` get
-- | Get amount style associated with default currency.
--
-- Returns 'AmountStyle' used to defined by a latest default commodity directive
-- prior to current position within this file or its parents.
getDefaultAmountStyle :: JournalParser m (Maybe AmountStyle)
getDefaultAmountStyle = fmap snd <$> getDefaultCommodityAndStyle
-- | Get the 'AmountStyle' declared by the most recently parsed (in the current or parent files,
-- prior to the current position) commodity directive for the given commodity, if any.
getAmountStyle :: CommoditySymbol -> JournalParser m (Maybe AmountStyle)
getAmountStyle commodity = do
Journal{jcommodities} <- get
let mspecificStyle = M.lookup commodity jcommodities >>= cformat
mdefaultStyle <- fmap snd <$> getDefaultCommodityAndStyle
return $ listToMaybe $ catMaybes [mspecificStyle, mdefaultStyle]
addDeclaredAccountTags :: AccountName -> [Tag] -> JournalParser m ()
addDeclaredAccountTags acct atags =
modify' (\j -> j{jdeclaredaccounttags = M.insertWith (flip union) acct atags (jdeclaredaccounttags j)})
addDeclaredAccountType :: AccountName -> AccountType -> JournalParser m ()
addDeclaredAccountType acct atype =
modify' (\j -> j{jdeclaredaccounttypes = M.insertWith (++) atype [acct] (jdeclaredaccounttypes j)})
pushParentAccount :: AccountName -> JournalParser m ()
pushParentAccount acct = modify' (\j -> j{jparseparentaccounts = acct : jparseparentaccounts j})
popParentAccount :: JournalParser m ()
popParentAccount = do
j <- get
case jparseparentaccounts j of
[] -> unexpected (Tokens ('E' :| "nd of apply account block with no beginning"))
(_:rest) -> put j{jparseparentaccounts=rest}
getParentAccount :: JournalParser m AccountName
getParentAccount = fmap (concatAccountNames . reverse . jparseparentaccounts) get
addAccountAlias :: MonadState Journal m => AccountAlias -> m ()
addAccountAlias a = modify' (\(j@Journal{..}) -> j{jparsealiases=a:jparsealiases})
getAccountAliases :: MonadState Journal m => m [AccountAlias]
getAccountAliases = fmap jparsealiases get
clearAccountAliases :: MonadState Journal m => m ()
clearAccountAliases = modify' (\j -> j{jparsealiases=[]})
-- getTransactionCount :: MonadState Journal m => m Integer
-- getTransactionCount = fmap jparsetransactioncount get
--
-- setTransactionCount :: MonadState Journal m => Integer -> m ()
-- setTransactionCount i = modify' (\j -> j{jparsetransactioncount=i})
--
-- -- | Increment the transaction index by one and return the new value.
-- incrementTransactionCount :: MonadState Journal m => m Integer
-- incrementTransactionCount = do
-- modify' (\j -> j{jparsetransactioncount=jparsetransactioncount j + 1})
-- getTransactionCount
journalAddFile :: (FilePath,Text) -> Journal -> Journal
journalAddFile f j@Journal{jfiles=fs} = j{jfiles=fs++[f]}
-- append, unlike the other fields, even though we do a final reverse,
-- to compensate for additional reversal due to including/monoid-concatting
-- A version of `match` that is strict in the returned text
match' :: TextParser m a -> TextParser m (Text, a)
match' p = do
(!txt, p) <- match p
pure (txt, p)
--- ** parsers
--- *** transaction bits
statusp :: TextParser m Status
statusp =
choice'
[ skipNonNewlineSpaces >> char '*' >> return Cleared
, skipNonNewlineSpaces >> char '!' >> return Pending
, return Unmarked
]
codep :: TextParser m Text
codep = option "" $ do
try $ do
skipNonNewlineSpaces1
char '('
code <- takeWhileP Nothing $ \c -> c /= ')' && c /= '\n'
char ')' <?> "closing bracket ')' for transaction code"
pure code
-- | Parse possibly empty text until a semicolon or newline.
-- Whitespace is preserved (for now - perhaps helps preserve alignment
-- of same-line comments ?).
descriptionp :: TextParser m Text
descriptionp = noncommenttextp <?> "description"
--- *** dates
-- | Parse a date in YYYY-MM-DD format.
-- Slash (/) and period (.) are also allowed as separators.
-- The year may be omitted if a default year has been set.
-- Leading zeroes may be omitted.
datep :: JournalParser m Day
datep = do
mYear <- getYear
lift $ datep' mYear
datep' :: Maybe Year -> TextParser m Day
datep' mYear = do
startOffset <- getOffset
d1 <- yearorintp <?> "year or month"
sep <- datesepchar <?> "date separator"
d2 <- decimal <?> "month or day"
case d1 of
Left y -> fullDate startOffset y sep d2
Right m -> partialDate startOffset mYear m sep d2
<?> "full or partial date"
where
fullDate :: Int -> Year -> Char -> Month -> TextParser m Day
fullDate startOffset year sep1 month = do
sep2 <- satisfy isDateSepChar <?> "date separator"
day <- decimal <?> "day"
endOffset <- getOffset
let dateStr = show year ++ [sep1] ++ show month ++ [sep2] ++ show day
when (sep1 /= sep2) $ customFailure $ parseErrorAtRegion startOffset endOffset $
"invalid date: separators are different, should be the same"
case fromGregorianValid year month day of
Nothing -> customFailure $ parseErrorAtRegion startOffset endOffset $
"well-formed but invalid date: " ++ dateStr
Just date -> pure $! date
partialDate :: Int -> Maybe Year -> Month -> Char -> MonthDay -> TextParser m Day
partialDate startOffset mYear month sep day = do
endOffset <- getOffset
case mYear of
Just year ->
case fromGregorianValid year month day of
Nothing -> customFailure $ parseErrorAtRegion startOffset endOffset $
"well-formed but invalid date: " ++ dateStr
Just date -> pure $! date
where dateStr = show year ++ [sep] ++ show month ++ [sep] ++ show day
Nothing -> customFailure $ parseErrorAtRegion startOffset endOffset $
"partial date "++dateStr++" found, but the current year is unknown"
where dateStr = show month ++ [sep] ++ show day
{-# INLINABLE datep' #-}
-- | Parse a date and time in YYYY-MM-DD HH:MM[:SS][+-ZZZZ] format.
-- Slash (/) and period (.) are also allowed as date separators.
-- The year may be omitted if a default year has been set.
-- Seconds are optional.
-- The timezone is optional and ignored (the time is always interpreted as a local time).
-- Leading zeroes may be omitted (except in a timezone).
datetimep :: JournalParser m LocalTime
datetimep = do
mYear <- getYear
lift $ datetimep' mYear
datetimep' :: Maybe Year -> TextParser m LocalTime
datetimep' mYear = do
day <- datep' mYear
skipNonNewlineSpaces1
time <- timeOfDay
optional timeZone -- ignoring time zones
pure $ LocalTime day time
where
timeOfDay :: TextParser m TimeOfDay
timeOfDay = do
off1 <- getOffset
h' <- twoDigitDecimal <?> "hour"
off2 <- getOffset
unless (h' >= 0 && h' <= 23) $ customFailure $
parseErrorAtRegion off1 off2 "invalid time (bad hour)"
char ':' <?> "':' (hour-minute separator)"
off3 <- getOffset
m' <- twoDigitDecimal <?> "minute"
off4 <- getOffset
unless (m' >= 0 && m' <= 59) $ customFailure $
parseErrorAtRegion off3 off4 "invalid time (bad minute)"
s' <- option 0 $ do
char ':' <?> "':' (minute-second separator)"
off5 <- getOffset
s' <- twoDigitDecimal <?> "second"
off6 <- getOffset
unless (s' >= 0 && s' <= 59) $ customFailure $
parseErrorAtRegion off5 off6 "invalid time (bad second)"
-- we do not support leap seconds
pure s'
pure $ TimeOfDay h' m' (fromIntegral s')
twoDigitDecimal :: TextParser m Int
twoDigitDecimal = do
d1 <- digitToInt <$> digitChar
d2 <- digitToInt <$> (digitChar <?> "a second digit")
pure $ d1*10 + d2
timeZone :: TextParser m String
timeZone = do
plusminus <- satisfy $ \c -> c == '-' || c == '+'
fourDigits <- count 4 (digitChar <?> "a digit (for a time zone)")
pure $ plusminus:fourDigits
secondarydatep :: Day -> TextParser m Day
secondarydatep primaryDate = char '=' *> datep' (Just primaryYear)
where primaryYear = first3 $ toGregorian primaryDate
-- | Parse a year number or an Int. Years must contain at least four
-- digits.
yearorintp :: TextParser m (Either Year Int)
yearorintp = do
yearOrMonth <- takeWhile1P (Just "digit") isDigit
let n = readDecimal yearOrMonth
return $ if T.length yearOrMonth >= 4 then Left n else Right (fromInteger n)
--- *** account names
-- | Parse an account name (plus one following space if present),
-- then apply any parent account prefix and/or account aliases currently in effect,
-- in that order. (Ie first add the parent account prefix, then rewrite with aliases).
-- This calls error if any account alias with an invalid regular expression exists.
modifiedaccountnamep :: JournalParser m AccountName
modifiedaccountnamep = do
parent <- getParentAccount
aliases <- getAccountAliases
-- off1 <- getOffset
a <- lift accountnamep
-- off2 <- getOffset
-- XXX or accountNameApplyAliasesMemo ? doesn't seem to make a difference (retest that function)
case accountNameApplyAliases aliases $ joinAccountNames parent a of
Right a' -> return $! a'
-- should not happen, regexaliasp will have displayed a better error already:
-- (XXX why does customFailure cause error to be displayed there, but not here ?)
-- Left e -> customFailure $! parseErrorAtRegion off1 off2 err
Left e -> error' err -- PARTIAL:
where
err = "problem in account alias applied to "++T.unpack a++": "++e
-- | Parse an account name, plus one following space if present.
-- Account names have one or more parts separated by the account separator character,
-- and are terminated by two or more spaces (or end of input).
-- Each part is at least one character long, may have single spaces inside it,
-- and starts with a non-whitespace.
-- Note, this means "{account}", "%^!" and ";comment" are all accepted
-- (parent parsers usually prevent/consume the last).
-- It should have required parts to start with an alphanumeric;
-- for now it remains as-is for backwards compatibility.
accountnamep :: TextParser m AccountName
accountnamep = singlespacedtext1p
-- | Parse possibly empty text, including whitespace,
-- until a comment start (semicolon) or newline.
noncommenttextp :: TextParser m T.Text
noncommenttextp = takeWhileP Nothing (\c -> not $ isSameLineCommentStart c || isNewline c)
-- | Parse non-empty text, including whitespace,
-- until a comment start (semicolon) or newline.
noncommenttext1p :: TextParser m T.Text
noncommenttext1p = takeWhile1P Nothing (\c -> not $ isSameLineCommentStart c || isNewline c)
-- | Parse non-empty, single-spaced text starting and ending with non-whitespace,
-- until a double space or newline.
singlespacedtext1p :: TextParser m T.Text
singlespacedtext1p = singlespacedtextsatisfying1p (const True)
-- | Parse non-empty, single-spaced text starting and ending with non-whitespace,
-- until a comment start (semicolon), double space, or newline.
singlespacednoncommenttext1p :: TextParser m T.Text
singlespacednoncommenttext1p = singlespacedtextsatisfying1p (not . isSameLineCommentStart)
-- | Parse non-empty, single-spaced text starting and ending with non-whitespace,
-- where all characters satisfy the given predicate.
singlespacedtextsatisfying1p :: (Char -> Bool) -> TextParser m T.Text
singlespacedtextsatisfying1p pred = do
firstPart <- partp
otherParts <- many $ try $ singlespacep *> partp
pure $! T.unwords $ firstPart : otherParts
where
partp = takeWhile1P Nothing (\c -> pred c && not (isSpace c))
-- | Parse one non-newline whitespace character that is not followed by another one.
singlespacep :: TextParser m ()
singlespacep = spacenonewline *> notFollowedBy spacenonewline
--- *** amounts
-- | Parse whitespace then an amount, with an optional left or right
-- currency symbol and optional price, or return the special
-- "missing" marker amount.
spaceandamountormissingp :: JournalParser m MixedAmount
spaceandamountormissingp =
option missingmixedamt $ try $ do
lift $ skipNonNewlineSpaces1
mixedAmount <$> amountp
-- | Parse a single-commodity amount, with optional symbol on the left
-- or right, followed by, in any order: an optional transaction price,
-- an optional ledger-style lot price, and/or an optional ledger-style
-- lot date. A lot price and lot date will be ignored.
--
-- To parse the amount's quantity (number) we need to know which character
-- represents a decimal mark. We find it in one of three ways:
--
-- 1. If a decimal mark has been set explicitly in the journal parse state,
-- we use that
--
-- 2. Or if the journal has a commodity declaration for the amount's commodity,
-- we get the decimal mark from that
--
-- 3. Otherwise we will parse any valid decimal mark appearing in the
-- number, as long as the number appears well formed.
--
-- Note 3 is the default zero-config case; it means we automatically handle
-- files with any supported decimal mark, but it also allows different decimal marks
-- in different amounts, which is a bit too loose. There's an open issue.
amountp :: JournalParser m Amount
amountp = amountpwithmultiplier False
amountpwithmultiplier :: Bool -> JournalParser m Amount
amountpwithmultiplier mult = label "amount" $ do
let spaces = lift $ skipNonNewlineSpaces
amount <- amountwithoutpricep mult <* spaces
(mprice, _elotprice, _elotdate) <- runPermutation $
(,,) <$> toPermutationWithDefault Nothing (Just <$> priceamountp amount <* spaces)
<*> toPermutationWithDefault Nothing (Just <$> lotpricep <* spaces)
<*> toPermutationWithDefault Nothing (Just <$> lotdatep <* spaces)
pure $ amount { aprice = mprice }
amountpnolotpricesp :: JournalParser m Amount
amountpnolotpricesp = label "amount" $ do
let spaces = lift $ skipNonNewlineSpaces
amount <- amountwithoutpricep False
spaces
mprice <- optional $ priceamountp amount <* spaces
pure $ amount { aprice = mprice }
amountwithoutpricep :: Bool -> JournalParser m Amount
amountwithoutpricep mult = do
sign <- lift signp
leftsymbolamountp sign <|> rightornosymbolamountp sign
where
leftsymbolamountp :: (Decimal -> Decimal) -> JournalParser m Amount
leftsymbolamountp sign = label "amount" $ do
c <- lift commoditysymbolp
mdecmarkStyle <- getDecimalMarkStyle
mcommodityStyle <- getAmountStyle c
-- XXX amounts of this commodity in periodic transaction rules and auto posting rules ? #1461
let suggestedStyle = mdecmarkStyle <|> mcommodityStyle
commodityspaced <- lift skipNonNewlineSpaces'
sign2 <- lift $ signp
offBeforeNum <- getOffset
ambiguousRawNum <- lift rawnumberp
mExponent <- lift $ optional $ try exponentp
offAfterNum <- getOffset
let numRegion = (offBeforeNum, offAfterNum)
(q,prec,mdec,mgrps) <- lift $ interpretNumber numRegion suggestedStyle ambiguousRawNum mExponent
let s = amountstyle{ascommodityside=L, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
return nullamt{acommodity=c, aquantity=sign (sign2 q), astyle=s, aprice=Nothing}
rightornosymbolamountp :: (Decimal -> Decimal) -> JournalParser m Amount
rightornosymbolamountp sign = label "amount" $ do
offBeforeNum <- getOffset
ambiguousRawNum <- lift rawnumberp
mExponent <- lift $ optional $ try exponentp
offAfterNum <- getOffset
let numRegion = (offBeforeNum, offAfterNum)
mSpaceAndCommodity <- lift $ optional $ try $ (,) <$> skipNonNewlineSpaces' <*> commoditysymbolp
case mSpaceAndCommodity of
-- right symbol amount
Just (commodityspaced, c) -> do
mdecmarkStyle <- getDecimalMarkStyle
mcommodityStyle <- getAmountStyle c
-- XXX amounts of this commodity in periodic transaction rules and auto posting rules ? #1461
let msuggestedStyle = mdecmarkStyle <|> mcommodityStyle
(q,prec,mdec,mgrps) <- lift $ interpretNumber numRegion msuggestedStyle ambiguousRawNum mExponent
let s = amountstyle{ascommodityside=R, ascommodityspaced=commodityspaced, asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps}
return nullamt{acommodity=c, aquantity=sign q, astyle=s, aprice=Nothing}
-- no symbol amount
Nothing -> do
-- look for a number style to use when parsing, based on
-- these things we've already parsed, in this order of preference:
mdecmarkStyle <- getDecimalMarkStyle -- a decimal-mark CSV rule
mcommodityStyle <- getAmountStyle "" -- a commodity directive for the no-symbol commodity
mdefaultStyle <- getDefaultAmountStyle -- a D default commodity directive
-- XXX no-symbol amounts in periodic transaction rules and auto posting rules ? #1461
let msuggestedStyle = mdecmarkStyle <|> mcommodityStyle <|> mdefaultStyle
(q,prec,mdec,mgrps) <- lift $ interpretNumber numRegion msuggestedStyle ambiguousRawNum mExponent
-- if a default commodity has been set, apply it and its style to this amount
-- (unless it's a multiplier in an automated posting)
defcs <- getDefaultCommodityAndStyle
let (c,s) = case (mult, defcs) of
(False, Just (defc,defs)) -> (defc, defs{asprecision=max (asprecision defs) prec})
_ -> ("", amountstyle{asprecision=prec, asdecimalpoint=mdec, asdigitgroups=mgrps})
return nullamt{acommodity=c, aquantity=sign q, astyle=s, aprice=Nothing}
-- For reducing code duplication. Doesn't parse anything. Has the type
-- of a parser only in order to throw parse errors (for convenience).
interpretNumber
:: (Int, Int) -- offsets
-> Maybe AmountStyle
-> Either AmbiguousNumber RawNumber
-> Maybe Integer
-> TextParser m (Quantity, AmountPrecision, Maybe Char, Maybe DigitGroupStyle)
interpretNumber posRegion msuggestedStyle ambiguousNum mExp =
let rawNum = either (disambiguateNumber msuggestedStyle) id ambiguousNum
in case fromRawNumber rawNum mExp of
Left errMsg -> customFailure $
uncurry parseErrorAtRegion posRegion errMsg
Right (q,p,d,g) -> pure (q, Precision p, d, g)
-- | Try to parse an amount from a string
amountp'' :: String -> Either (ParseErrorBundle Text CustomErr) Amount
amountp'' s = runParser (evalStateT (amountp <* eof) nulljournal) "" (T.pack s)
-- | Parse an amount from a string, or get an error.
amountp' :: String -> Amount
amountp' s =
case amountp'' s of
Right amt -> amt
Left err -> error' $ show err -- PARTIAL: XXX should throwError
-- | Parse a mixed amount from a string, or get an error.
mamountp' :: String -> MixedAmount
mamountp' = mixedAmount . amountp'
-- | Parse a minus or plus sign followed by zero or more spaces,
-- or nothing, returning a function that negates or does nothing.
signp :: Num a => TextParser m (a -> a)
signp = ((char '-' $> negate <|> char '+' $> id) <* skipNonNewlineSpaces) <|> pure id
commoditysymbolp :: TextParser m CommoditySymbol
commoditysymbolp =
quotedcommoditysymbolp <|> simplecommoditysymbolp <?> "commodity symbol"
quotedcommoditysymbolp :: TextParser m CommoditySymbol
quotedcommoditysymbolp =
between (char '"') (char '"') $ takeWhile1P Nothing f
where f c = c /= ';' && c /= '\n' && c /= '\"'
simplecommoditysymbolp :: TextParser m CommoditySymbol
simplecommoditysymbolp = takeWhile1P Nothing (not . isNonsimpleCommodityChar)
priceamountp :: Amount -> JournalParser m AmountPrice
priceamountp baseAmt = label "transaction price" $ do
-- https://www.ledger-cli.org/3.0/doc/ledger3.html#Virtual-posting-costs
parenthesised <- option False $ char '(' >> pure True
char '@'
totalPrice <- char '@' $> True <|> pure False
when parenthesised $ void $ char ')'
lift skipNonNewlineSpaces
priceAmount <- amountwithoutpricep False -- <?> "unpriced amount (specifying a price)"
let amtsign' = signum $ aquantity baseAmt
amtsign = if amtsign' == 0 then 1 else amtsign'
pure $ if totalPrice
then TotalPrice priceAmount{aquantity=amtsign * aquantity priceAmount}
else UnitPrice priceAmount
balanceassertionp :: JournalParser m BalanceAssertion
balanceassertionp = do
sourcepos <- getSourcePos
char '='
istotal <- fmap isJust $ optional $ try $ char '='
isinclusive <- fmap isJust $ optional $ try $ char '*'
lift skipNonNewlineSpaces
-- this amount can have a price; balance assertions ignore it,
-- but balance assignments will use it
a <- amountpnolotpricesp <?> "amount (for a balance assertion or assignment)"
return BalanceAssertion
{ baamount = a
, batotal = istotal
, bainclusive = isinclusive
, baposition = sourcepos
}
-- Parse a Ledger-style fixed {=UNITPRICE} or non-fixed {UNITPRICE}
-- or fixed {{=TOTALPRICE}} or non-fixed {{TOTALPRICE}} lot price,
-- and ignore it.
-- https://www.ledger-cli.org/3.0/doc/ledger3.html#Fixing-Lot-Prices .
lotpricep :: JournalParser m ()
lotpricep = label "ledger-style lot price" $ do
char '{'
doublebrace <- option False $ char '{' >> pure True
_fixed <- fmap isJust $ optional $ lift skipNonNewlineSpaces >> char '='
lift skipNonNewlineSpaces
_a <- amountwithoutpricep False
lift skipNonNewlineSpaces
char '}'
when (doublebrace) $ void $ char '}'
-- Parse a Ledger-style lot date [DATE], and ignore it.
-- https://www.ledger-cli.org/3.0/doc/ledger3.html#Fixing-Lot-Prices .
lotdatep :: JournalParser m ()
lotdatep = (do
char '['
lift skipNonNewlineSpaces
_d <- datep
lift skipNonNewlineSpaces
char ']'
return ()
) <?> "ledger-style lot date"
-- | Parse a string representation of a number for its value and display
-- attributes.
--
-- Some international number formats are accepted, eg either period or comma
-- may be used for the decimal mark, and the other of these may be used for
-- separating digit groups in the integer part. See
-- http://en.wikipedia.org/wiki/Decimal_separator for more examples.
--
-- This returns: the parsed numeric value, the precision (number of digits
-- seen following the decimal mark), the decimal mark character used if any,
-- and the digit group style if any.
--
numberp :: Maybe AmountStyle -> TextParser m (Quantity, Word8, Maybe Char, Maybe DigitGroupStyle)
numberp suggestedStyle = label "number" $ do
-- a number is an optional sign followed by a sequence of digits possibly
-- interspersed with periods, commas, or both
-- dbgparse 0 "numberp"
sign <- signp
rawNum <- either (disambiguateNumber suggestedStyle) id <$> rawnumberp
mExp <- optional $ try $ exponentp
dbg7 "numberp suggestedStyle" suggestedStyle `seq` return ()
case dbg7 "numberp quantity,precision,mdecimalpoint,mgrps"
$ fromRawNumber rawNum mExp of
Left errMsg -> Fail.fail errMsg
Right (q, p, d, g) -> pure (sign q, p, d, g)
exponentp :: TextParser m Integer
exponentp = char' 'e' *> signp <*> decimal <?> "exponent"
-- | Interpret a raw number as a decimal number.
--
-- Returns:
-- - the decimal number
-- - the precision (number of digits after the decimal point)
-- - the decimal point character, if any
-- - the digit group style, if any (digit group character and sizes of digit groups)
fromRawNumber
:: RawNumber
-> Maybe Integer
-> Either String
(Quantity, Word8, Maybe Char, Maybe DigitGroupStyle)
fromRawNumber (WithSeparators{}) (Just _) =
Left "invalid number: digit separators and exponents may not be used together"
fromRawNumber raw mExp = do
(quantity, precision) <- toQuantity (fromMaybe 0 mExp) (digitGroup raw) (decimalGroup raw)
return (quantity, precision, mDecPt raw, digitGroupStyle raw)
where
toQuantity :: Integer -> DigitGrp -> DigitGrp -> Either String (Quantity, Word8)
toQuantity e preDecimalGrp postDecimalGrp
| precision < 0 = Right (Decimal 0 (digitGrpNum * 10^(-precision)), 0)
| precision < 256 = Right (Decimal precision8 digitGrpNum, precision8)
| otherwise = Left "invalid number: numbers with more than 255 decimal places are currently not supported"
where
digitGrpNum = digitGroupNumber $ preDecimalGrp <> postDecimalGrp
precision = toInteger (digitGroupLength postDecimalGrp) - e
precision8 = fromIntegral precision :: Word8
mDecPt (NoSeparators _ mDecimals) = fst <$> mDecimals
mDecPt (WithSeparators _ _ mDecimals) = fst <$> mDecimals
decimalGroup (NoSeparators _ mDecimals) = maybe mempty snd mDecimals
decimalGroup (WithSeparators _ _ mDecimals) = maybe mempty snd mDecimals
digitGroup (NoSeparators digitGrp _) = digitGrp
digitGroup (WithSeparators _ digitGrps _) = mconcat digitGrps
digitGroupStyle (NoSeparators _ _) = Nothing
digitGroupStyle (WithSeparators sep grps _) = Just . DigitGroups sep $ groupSizes grps
-- Outputs digit group sizes from least significant to most significant
groupSizes :: [DigitGrp] -> [Word8]
groupSizes digitGrps = reverse $ case map (fromIntegral . digitGroupLength) digitGrps of
(a:b:cs) | a < b -> b:cs
gs -> gs
disambiguateNumber :: Maybe AmountStyle -> AmbiguousNumber -> RawNumber
disambiguateNumber msuggestedStyle (AmbiguousNumber grp1 sep grp2) =
-- If present, use the suggested style to disambiguate;
-- otherwise, assume that the separator is a decimal point where possible.
if isDecimalMark sep &&
maybe True (sep `isValidDecimalBy`) msuggestedStyle
then NoSeparators grp1 (Just (sep, grp2))
else WithSeparators sep [grp1, grp2] Nothing
where
isValidDecimalBy :: Char -> AmountStyle -> Bool
isValidDecimalBy c = \case
AmountStyle{asdecimalpoint = Just d} -> d == c
AmountStyle{asdigitgroups = Just (DigitGroups g _)} -> g /= c
AmountStyle{asprecision = Precision 0} -> False
_ -> True
-- | Parse and interpret the structure of a number without external hints.
-- Numbers are digit strings, possibly separated into digit groups by one
-- of two types of separators. (1) Numbers may optionally have a decimal
-- mark, which may be either a period or comma. (2) Numbers may
-- optionally contain digit group marks, which must all be either a
-- period, a comma, or a space.
--
-- It is our task to deduce the characters used as decimal mark and
-- digit group mark, based on the allowed syntax. For instance, we
-- make use of the fact that a decimal mark can occur at most once and
-- must be to the right of all digit group marks.
--
-- >>> parseTest rawnumberp "1,234,567.89"
-- Right (WithSeparators ',' ["1","234","567"] (Just ('.',"89")))
-- >>> parseTest rawnumberp "1,000"
-- Left (AmbiguousNumber "1" ',' "000")
-- >>> parseTest rawnumberp "1 000"
-- Right (WithSeparators ' ' ["1","000"] Nothing)
--
rawnumberp :: TextParser m (Either AmbiguousNumber RawNumber)
rawnumberp = label "number" $ do
rawNumber <- fmap Right leadingDecimalPt <|> leadingDigits
-- Guard against mistyped numbers
mExtraDecimalSep <- optional $ lookAhead $ satisfy isDecimalMark
when (isJust mExtraDecimalSep) $
Fail.fail "invalid number (invalid use of separator)"
mExtraFragment <- optional $ lookAhead $ try $
char ' ' *> getOffset <* digitChar
case mExtraFragment of
Just off -> customFailure $
parseErrorAt off "invalid number (excessive trailing digits)"
Nothing -> pure ()
return $ dbg7 "rawnumberp" rawNumber
where
leadingDecimalPt :: TextParser m RawNumber
leadingDecimalPt = do
decPt <- satisfy isDecimalMark
decGrp <- digitgroupp
pure $ NoSeparators mempty (Just (decPt, decGrp))
leadingDigits :: TextParser m (Either AmbiguousNumber RawNumber)
leadingDigits = do
grp1 <- digitgroupp
withSeparators grp1 <|> fmap Right (trailingDecimalPt grp1)
<|> pure (Right $ NoSeparators grp1 Nothing)
withSeparators :: DigitGrp -> TextParser m (Either AmbiguousNumber RawNumber)
withSeparators grp1 = do
(sep, grp2) <- try $ (,) <$> satisfy isDigitSeparatorChar <*> digitgroupp
grps <- many $ try $ char sep *> digitgroupp
let digitGroups = grp1 : grp2 : grps
fmap Right (withDecimalPt sep digitGroups)
<|> pure (withoutDecimalPt grp1 sep grp2 grps)
withDecimalPt :: Char -> [DigitGrp] -> TextParser m RawNumber
withDecimalPt digitSep digitGroups = do
decPt <- satisfy $ \c -> isDecimalMark c && c /= digitSep
decDigitGrp <- option mempty digitgroupp
pure $ WithSeparators digitSep digitGroups (Just (decPt, decDigitGrp))
withoutDecimalPt
:: DigitGrp
-> Char
-> DigitGrp
-> [DigitGrp]
-> Either AmbiguousNumber RawNumber
withoutDecimalPt grp1 sep grp2 grps
| null grps && isDecimalMark sep =
Left $ AmbiguousNumber grp1 sep grp2
| otherwise = Right $ WithSeparators sep (grp1:grp2:grps) Nothing
trailingDecimalPt :: DigitGrp -> TextParser m RawNumber
trailingDecimalPt grp1 = do
decPt <- satisfy isDecimalMark
pure $ NoSeparators grp1 (Just (decPt, mempty))
isDigitSeparatorChar :: Char -> Bool
isDigitSeparatorChar c = isDecimalMark c || c == ' '
-- | Some kinds of number literal we might parse.
data RawNumber
= NoSeparators DigitGrp (Maybe (Char, DigitGrp))
-- ^ A number with no digit group marks (eg 100),
-- or with a leading or trailing comma or period
-- which (apparently) we interpret as a decimal mark (like 100. or .100)
| WithSeparators Char [DigitGrp] (Maybe (Char, DigitGrp))
-- ^ A number with identifiable digit group marks
-- (eg 1,000,000 or 1,000.50 or 1 000)
deriving (Show, Eq)
-- | Another kind of number literal: this one contains either a digit
-- group separator or a decimal mark, we're not sure which (eg 1,000 or 100.50).
data AmbiguousNumber = AmbiguousNumber DigitGrp Char DigitGrp
deriving (Show, Eq)
-- | Description of a single digit group in a number literal.
-- "Thousands" is one well known digit grouping, but there are others.
data DigitGrp = DigitGrp {
digitGroupLength :: !Word, -- ^ The number of digits in this group.
-- This is Word to avoid the need to do overflow
-- checking for the Semigroup instance of DigitGrp.
digitGroupNumber :: !Integer -- ^ The natural number formed by this group's digits. This should always be positive.
} deriving (Eq)
-- | A custom show instance, showing digit groups as the parser saw them.
instance Show DigitGrp where
show (DigitGrp len num) = "\"" ++ padding ++ numStr ++ "\""
where numStr = show num
padding = genericReplicate (toInteger len - toInteger (length numStr)) '0'
instance Sem.Semigroup DigitGrp where
DigitGrp l1 n1 <> DigitGrp l2 n2 = DigitGrp (l1 + l2) (n1 * 10^l2 + n2)
instance Monoid DigitGrp where
mempty = DigitGrp 0 0
mappend = (Sem.<>)
digitgroupp :: TextParser m DigitGrp
digitgroupp = label "digits"
$ makeGroup <$> takeWhile1P (Just "digit") isDigit
where
makeGroup = uncurry DigitGrp . T.foldl' step (0, 0)
step (!l, !a) c = (l+1, a*10 + fromIntegral (digitToInt c))
--- *** comments
multilinecommentp :: TextParser m ()
multilinecommentp = startComment *> anyLine `skipManyTill` endComment
where
startComment = string "comment" *> trailingSpaces
endComment = eof <|> string "end comment" *> trailingSpaces
trailingSpaces = skipNonNewlineSpaces <* newline
anyLine = void $ takeWhileP Nothing (/='\n') *> newline
{-# INLINABLE multilinecommentp #-}
-- | A blank or comment line in journal format: a line that's empty or
-- containing only whitespace or whose first non-whitespace character
-- is semicolon, hash, or star.
emptyorcommentlinep :: TextParser m ()
emptyorcommentlinep = do
skipNonNewlineSpaces
skiplinecommentp <|> void newline
where
skiplinecommentp :: TextParser m ()
skiplinecommentp = do
satisfy isLineCommentStart
void $ takeWhileP Nothing (/= '\n')
optional newline
pure ()
{-# INLINABLE emptyorcommentlinep #-}
-- | Is this a character that, as the first non-whitespace on a line,
-- starts a comment line ?
isLineCommentStart :: Char -> Bool
isLineCommentStart '#' = True
isLineCommentStart '*' = True
isLineCommentStart ';' = True
isLineCommentStart _ = False
-- | Is this a character that, appearing anywhere within a line,
-- starts a comment ?
isSameLineCommentStart :: Char -> Bool
isSameLineCommentStart ';' = True
isSameLineCommentStart _ = False
-- A parser combinator for parsing (possibly multiline) comments
-- following journal items.
--
-- Several journal items may be followed by comments, which begin with
-- semicolons and extend to the end of the line. Such comments may span
-- multiple lines, but comment lines below the journal item must be
-- preceded by leading whitespace.
--
-- This parser combinator accepts a parser that consumes all input up
-- until the next newline. This parser should extract the "content" from
-- comments. The resulting parser returns this content plus the raw text
-- of the comment itself.
--
-- See followingcommentp for tests.
--
followingcommentp' :: (Monoid a, Show a) => TextParser m a -> TextParser m (Text, a)
followingcommentp' contentp = do
skipNonNewlineSpaces
-- there can be 0 or 1 sameLine
sameLine <- try headerp *> ((:[]) <$> match' contentp) <|> pure []
_ <- eolof
-- there can be 0 or more nextLines
nextLines <- many $
try (skipNonNewlineSpaces1 *> headerp) *> match' contentp <* eolof
let
-- if there's just a next-line comment, insert an empty same-line comment
-- so the next-line comment doesn't get rendered as a same-line comment.
sameLine' | null sameLine && not (null nextLines) = [("",mempty)]
| otherwise = sameLine
(texts, contents) = unzip $ sameLine' ++ nextLines
strippedCommentText = T.unlines $ map T.strip texts
commentContent = mconcat contents
pure (strippedCommentText, commentContent)
where
headerp = char ';' *> skipNonNewlineSpaces
{-# INLINABLE followingcommentp' #-}
-- | Parse the text of a (possibly multiline) comment following a journal item.
--
-- >>> rtp followingcommentp "" -- no comment
-- Right ""
-- >>> rtp followingcommentp ";" -- just a (empty) same-line comment. newline is added
-- Right "\n"
-- >>> rtp followingcommentp "; \n"
-- Right "\n"
-- >>> rtp followingcommentp ";\n ;\n" -- a same-line and a next-line comment
-- Right "\n\n"
-- >>> rtp followingcommentp "\n ;\n" -- just a next-line comment. Insert an empty same-line comment so the next-line comment doesn't become a same-line comment.
-- Right "\n\n"
--
followingcommentp :: TextParser m Text
followingcommentp =
fst <$> followingcommentp' (void $ takeWhileP Nothing (/= '\n')) -- XXX support \r\n ?
{-# INLINABLE followingcommentp #-}
-- | Parse a transaction comment and extract its tags.
--
-- The first line of a transaction may be followed by comments, which
-- begin with semicolons and extend to the end of the line. Transaction
-- comments may span multiple lines, but comment lines below the
-- transaction must be preceded by leading whitespace.
--
-- 2000/1/1 ; a transaction comment starting on the same line ...
-- ; extending to the next line
-- account1 $1
-- account2
--
-- Tags are name-value pairs.
--
-- >>> let getTags (_,tags) = tags
-- >>> let parseTags = fmap getTags . rtp transactioncommentp
--
-- >>> parseTags "; name1: val1, name2:all this is value2"
-- Right [("name1","val1"),("name2","all this is value2")]
--
-- A tag's name must be immediately followed by a colon, without
-- separating whitespace. The corresponding value consists of all the text
-- following the colon up until the next colon or newline, stripped of
-- leading and trailing whitespace.
--
transactioncommentp :: TextParser m (Text, [Tag])
transactioncommentp = followingcommentp' commenttagsp
{-# INLINABLE transactioncommentp #-}
commenttagsp :: TextParser m [Tag]
commenttagsp = do
tagName <- (last . T.split isSpace) <$> takeWhileP Nothing (\c -> c /= ':' && c /= '\n')
atColon tagName <|> pure [] -- if not ':', then either '\n' or EOF
where
atColon :: Text -> TextParser m [Tag]
atColon name = char ':' *> do
if T.null name
then commenttagsp
else do
skipNonNewlineSpaces
val <- tagValue
let tag = (name, val)
(tag:) <$> commenttagsp
tagValue :: TextParser m Text
tagValue = do
val <- T.strip <$> takeWhileP Nothing (\c -> c /= ',' && c /= '\n')
_ <- optional $ char ','
pure val
{-# INLINABLE commenttagsp #-}
-- | Parse a posting comment and extract its tags and dates.
--
-- Postings may be followed by comments, which begin with semicolons and
-- extend to the end of the line. Posting comments may span multiple
-- lines, but comment lines below the posting must be preceded by
-- leading whitespace.
--
-- 2000/1/1
-- account1 $1 ; a posting comment starting on the same line ...
-- ; extending to the next line
--
-- account2
-- ; a posting comment beginning on the next line
--
-- Tags are name-value pairs.
--
-- >>> let getTags (_,tags,_,_) = tags
-- >>> let parseTags = fmap getTags . rtp (postingcommentp Nothing)
--
-- >>> parseTags "; name1: val1, name2:all this is value2"
-- Right [("name1","val1"),("name2","all this is value2")]
--
-- A tag's name must be immediately followed by a colon, without
-- separating whitespace. The corresponding value consists of all the text
-- following the colon up until the next colon or newline, stripped of
-- leading and trailing whitespace.
--
-- Posting dates may be expressed with "date"/"date2" tags or with
-- bracketed date syntax. Posting dates will inherit their year from the
-- transaction date if the year is not specified. We throw parse errors on
-- invalid dates.
--
-- >>> let getDates (_,_,d1,d2) = (d1, d2)
-- >>> let parseDates = fmap getDates . rtp (postingcommentp (Just 2000))
--
-- >>> parseDates "; date: 1/2, date2: 1999/12/31"
-- Right (Just 2000-01-02,Just 1999-12-31)
-- >>> parseDates "; [1/2=1999/12/31]"
-- Right (Just 2000-01-02,Just 1999-12-31)
--
-- Example: tags, date tags, and bracketed dates
-- >>> rtp (postingcommentp (Just 2000)) "; a:b, date:3/4, [=5/6]"
-- Right ("a:b, date:3/4, [=5/6]\n",[("a","b"),("date","3/4")],Just 2000-03-04,Just 2000-05-06)
--
-- Example: extraction of dates from date tags ignores trailing text
-- >>> rtp (postingcommentp (Just 2000)) "; date:3/4=5/6"
-- Right ("date:3/4=5/6\n",[("date","3/4=5/6")],Just 2000-03-04,Nothing)
--
postingcommentp
:: Maybe Year -> TextParser m (Text, [Tag], Maybe Day, Maybe Day)
postingcommentp mYear = do
(commentText, (tags, dateTags)) <-
followingcommentp' (commenttagsanddatesp mYear)
let mdate = snd <$> find ((=="date") .fst) dateTags
mdate2 = snd <$> find ((=="date2").fst) dateTags
pure (commentText, tags, mdate, mdate2)
{-# INLINABLE postingcommentp #-}
commenttagsanddatesp
:: Maybe Year -> TextParser m ([Tag], [DateTag])
commenttagsanddatesp mYear = do
(txt, dateTags) <- match $ readUpTo ':'
-- next char is either ':' or '\n' (or EOF)
let tagName = last (T.split isSpace txt)
(fmap.second) (dateTags++) (atColon tagName) <|> pure ([], dateTags) -- if not ':', then either '\n' or EOF
where
readUpTo :: Char -> TextParser m [DateTag]
readUpTo end = do
void $ takeWhileP Nothing (\c -> c /= end && c /= '\n' && c /= '[')
-- if not '[' then ':' or '\n' or EOF
atBracket (readUpTo end) <|> pure []
atBracket :: TextParser m [DateTag] -> TextParser m [DateTag]
atBracket cont = do
-- Uses the fact that bracketed date-tags cannot contain newlines
dateTags <- option [] $ lookAhead (bracketeddatetagsp mYear)
_ <- char '['
dateTags' <- cont
pure $ dateTags ++ dateTags'
atColon :: Text -> TextParser m ([Tag], [DateTag])
atColon name = char ':' *> do
skipNonNewlineSpaces
(tags, dateTags) <- case name of
"" -> pure ([], [])
"date" -> dateValue name
"date2" -> dateValue name
_ -> tagValue name
_ <- optional $ char ','
bimap (tags++) (dateTags++) <$> commenttagsanddatesp mYear
dateValue :: Text -> TextParser m ([Tag], [DateTag])
dateValue name = do
(txt, (date, dateTags)) <- match' $ do
date <- datep' mYear
dateTags <- readUpTo ','
pure (date, dateTags)
let val = T.strip txt
pure $ ( [(name, val)]
, (name, date) : dateTags )
tagValue :: Text -> TextParser m ([Tag], [DateTag])
tagValue name = do
(txt, dateTags) <- match' $ readUpTo ','
let val = T.strip txt
pure $ ( [(name, val)]
, dateTags )
{-# INLINABLE commenttagsanddatesp #-}
-- | Parse Ledger-style bracketed posting dates ([DATE=DATE2]), as
-- "date" and/or "date2" tags. Anything that looks like an attempt at
-- this (a square-bracketed sequence of 0123456789/-.= containing at
-- least one digit and one date separator) is also parsed, and will
-- throw an appropriate error.
--
-- The dates are parsed in full here so that errors are reported in
-- the right position. A missing year in DATE can be inferred if a
-- default date is provided. A missing year in DATE2 will be inferred
-- from DATE.
--
-- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[2016/1/2=3/4]"
-- Right [("date",2016-01-02),("date2",2016-03-04)]
--
-- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[1]"
-- Left ...not a bracketed date...
--
-- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[2016/1/32]"
-- Left ...1:2:...well-formed but invalid date: 2016/1/32...
--
-- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[1/31]"
-- Left ...1:2:...partial date 1/31 found, but the current year is unknown...
--
-- >>> either (Left . customErrorBundlePretty) Right $ rtp (bracketeddatetagsp Nothing) "[0123456789/-.=/-.=]"
-- Left ...1:13:...expecting month or day...
--
bracketeddatetagsp
:: Maybe Year -> TextParser m [(TagName, Day)]
bracketeddatetagsp mYear1 = do
-- dbgparse 0 "bracketeddatetagsp"
try $ do
s <- lookAhead
$ between (char '[') (char ']')
$ takeWhile1P Nothing isBracketedDateChar
unless (T.any isDigit s && T.any isDateSepChar s) $
Fail.fail "not a bracketed date"
-- Looks sufficiently like a bracketed date to commit to parsing a date
between (char '[') (char ']') $ do
md1 <- optional $ datep' mYear1
let mYear2 = fmap readYear md1 <|> mYear1
md2 <- optional $ char '=' *> datep' mYear2
pure $ catMaybes [("date",) <$> md1, ("date2",) <$> md2]
where
readYear = first3 . toGregorian
isBracketedDateChar c = isDigit c || isDateSepChar c || c == '='
{-# INLINABLE bracketeddatetagsp #-}
-- | Get the account name aliases from options, if any.
aliasesFromOpts :: InputOpts -> [AccountAlias]
aliasesFromOpts = map (\a -> fromparse $ runParser accountaliasp ("--alias "++quoteIfNeeded a) $ T.pack a)
. aliases_
accountaliasp :: TextParser m AccountAlias
accountaliasp = regexaliasp <|> basicaliasp
basicaliasp :: TextParser m AccountAlias
basicaliasp = do
-- dbgparse 0 "basicaliasp"
old <- rstrip <$> (some $ noneOf ("=" :: [Char]))
char '='
skipNonNewlineSpaces
new <- rstrip <$> anySingle `manyTill` eolof -- eol in journal, eof in command lines, normally
return $ BasicAlias (T.pack old) (T.pack new)
regexaliasp :: TextParser m AccountAlias
regexaliasp = do
-- dbgparse 0 "regexaliasp"
char '/'
off1 <- getOffset
re <- some $ noneOf ("/\n\r" :: [Char]) -- paranoid: don't try to read past line end
off2 <- getOffset
char '/'
skipNonNewlineSpaces
char '='
skipNonNewlineSpaces
repl <- anySingle `manyTill` eolof
case toRegexCI $ T.pack re of
Right r -> return $! RegexAlias r repl
Left e -> customFailure $! parseErrorAtRegion off1 off2 e
--- ** tests
tests_Common = testGroup "Common" [
testGroup "amountp" [
testCase "basic" $ assertParseEq amountp "$47.18" (usd 47.18)
,testCase "ends with decimal mark" $ assertParseEq amountp "$1." (usd 1 `withPrecision` Precision 0)
,testCase "unit price" $ assertParseEq amountp "$10 @ €0.5"
-- not precise enough:
-- (usd 10 `withPrecision` 0 `at` (eur 0.5 `withPrecision` 1)) -- `withStyle` asdecimalpoint=Just '.'
amount{
acommodity="$"
,aquantity=10 -- need to test internal precision with roundTo ? I think not
,astyle=amountstyle{asprecision=Precision 0, asdecimalpoint=Nothing}
,aprice=Just $ UnitPrice $
amount{
acommodity="€"
,aquantity=0.5
,astyle=amountstyle{asprecision=Precision 1, asdecimalpoint=Just '.'}
}
}
,testCase "total price" $ assertParseEq amountp "$10 @@ €5"
amount{
acommodity="$"
,aquantity=10
,astyle=amountstyle{asprecision=Precision 0, asdecimalpoint=Nothing}
,aprice=Just $ TotalPrice $
amount{
acommodity="€"
,aquantity=5
,astyle=amountstyle{asprecision=Precision 0, asdecimalpoint=Nothing}
}
}
,testCase "unit price, parenthesised" $ assertParse amountp "$10 (@) €0.5"
,testCase "total price, parenthesised" $ assertParse amountp "$10 (@@) €0.5"
]
,let p = lift (numberp Nothing) :: JournalParser IO (Quantity, Word8, Maybe Char, Maybe DigitGroupStyle) in
testCase "numberp" $ do
assertParseEq p "0" (0, 0, Nothing, Nothing)
assertParseEq p "1" (1, 0, Nothing, Nothing)
assertParseEq p "1.1" (1.1, 1, Just '.', Nothing)
assertParseEq p "1,000.1" (1000.1, 1, Just '.', Just $ DigitGroups ',' [3])
assertParseEq p "1.00.000,1" (100000.1, 1, Just ',', Just $ DigitGroups '.' [3,2])
assertParseEq p "1,000,000" (1000000, 0, Nothing, Just $ DigitGroups ',' [3,3]) -- could be simplified to [3]
assertParseEq p "1." (1, 0, Just '.', Nothing)
assertParseEq p "1," (1, 0, Just ',', Nothing)
assertParseEq p ".1" (0.1, 1, Just '.', Nothing)
assertParseEq p ",1" (0.1, 1, Just ',', Nothing)
assertParseError p "" ""
assertParseError p "1,000.000,1" ""
assertParseError p "1.000,000.1" ""
assertParseError p "1,000.000.1" ""
assertParseError p "1,,1" ""
assertParseError p "1..1" ""
assertParseError p ".1," ""
assertParseError p ",1." ""
assertParseEq p "1.555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555" (1.555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555, 255, Just '.', Nothing)
assertParseError p "1.5555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555555" ""
,testGroup "spaceandamountormissingp" [
testCase "space and amount" $ assertParseEq spaceandamountormissingp " $47.18" (mixedAmount $ usd 47.18)
,testCase "empty string" $ assertParseEq spaceandamountormissingp "" missingmixedamt
-- ,testCase "just space" $ assertParseEq spaceandamountormissingp " " missingmixedamt -- XXX should it ?
-- ,testCase "just amount" $ assertParseError spaceandamountormissingp "$47.18" "" -- succeeds, consuming nothing
]
]
| simonmichael/hledger | hledger-lib/Hledger/Read/Common.hs | gpl-3.0 | 66,485 | 1 | 26 | 14,050 | 13,143 | 6,869 | 6,274 | 929 | 6 |
--------------------------------------------------------------------------------
-- |
-- Module : Data.IDX
-- Copyright : Christof Schramm
-- License : GPL v 3
--
-- Maintainer : Christof Schramm <[email protected]>
-- Stability : Experimental
-- Portability : Should work in all common Haskell implementations
--
-- A package for reading and writing data in the IDX format.
-- This data format is used for machine-learning data sets like the
-- MNIST database of handwritten digits (<http://yann.lecun.com/exdb/mnist/>)
--------------------------------------------------------------------------------
module Data.IDX (
-- * Data types
IDXData
, IDXLabels
, IDXContentType(..)
-- * Accessing data
, idxType
, idxDimensions
, isIDXReal
, isIDXIntegral
-- * Raw data
, idxDoubleContent
, idxIntContent
-- * Labeled data
, labeledIntData
, labeledDoubleData
-- * IO / Serialization
-- ** IDXLabels
-- *** ByteString serialization
, encodeIDXLabels
, decodeIDXLabels
-- *** FileIO
, encodeIDXLabelsFile
, decodeIDXLabelsFile
-- ** IDXData (e.g. images)
-- *** ByteString serialization
, encodeIDX
, decodeIDX
-- *** File IO
, encodeIDXFile
, decodeIDXFile
)where
-- For compatibility with versions of base < 4.8
import Control.Applicative ((<$>))
import Control.Monad
import Data.Binary
import Data.ByteString.Lazy (ByteString)
import qualified Data.ByteString.Lazy as BL
import Data.IDX.Internal
import Data.Int
import Data.Traversable
import qualified Data.Vector.Unboxed as V
import Data.Vector.Unboxed ((!))
import Data.Word
-- | Partition a dataset and label each subpartition, return int values
labeledIntData:: IDXLabels -> IDXData -> Maybe [(Int, V.Vector Int)]
labeledIntData (IDXLabels v) dat =
if (V.length v) == dim0
then Just $ do
i <- [0 .. dim0 - 1]
let lab = v ! i
return $ (lab,V.slice (i*entrySize) entrySize content)
else Nothing
where
dim0 = (idxDimensions dat) ! 0
content = idxIntContent dat
entrySize = (V.product $ idxDimensions dat) `div` dim0
-- | Partition a dataset and label each subpartition, return double values
labeledDoubleData:: IDXLabels -> IDXData -> Maybe [(Int, V.Vector Double)]
labeledDoubleData (IDXLabels v) dat =
if (V.length v) == dim0
then Just $ do
i <- [0 .. dim0 - 1]
let lab = v ! i
return $ (lab,V.slice (i*entrySize) entrySize content)
else Nothing
where
dim0 = (idxDimensions dat) ! 0
content = idxDoubleContent dat
entrySize = (V.product $ idxDimensions dat) `div` dim0
-- | Read labels from a file, return 'Nothing' if something doesn't work
decodeIDXLabelsFile :: FilePath -> IO (Maybe IDXLabels)
decodeIDXLabelsFile path = BL.readFile path >>= return . decodeIDXLabels
decodeIDXLabels :: BL.ByteString -> Maybe IDXLabels
decodeIDXLabels content = case decodeOrFail content of
Right (_,_,result) -> Just result
Left _ -> Nothing
-- | Read data from a file, return 'Nothing' if something doesn't work
encodeIDXLabelsFile :: IDXLabels -> FilePath -> IO ()
encodeIDXLabelsFile labs path = encodeFile path labs
encodeIDXLabels :: IDXLabels -> BL.ByteString
encodeIDXLabels = encode
decodeIDXFile :: FilePath -> IO (Maybe IDXData)
decodeIDXFile path = BL.readFile path >>= return . decodeIDX
decodeIDX :: BL.ByteString -> Maybe IDXData
decodeIDX content = case decodeOrFail content of
Right (_,_,result) -> Just result
Left _ -> Nothing
encodeIDXFile :: IDXData -> FilePath -> IO ()
encodeIDXFile idx path = encodeFile path idx
encodeIDX :: IDXData -> BL.ByteString
encodeIDX = encode
| kryoxide/mnist-idx | src/Data/IDX.hs | lgpl-3.0 | 4,250 | 0 | 13 | 1,300 | 832 | 465 | 367 | 73 | 2 |
{-# LANGUAGE OverloadedStrings #-}
module Api.Server where
import Api.Budgets
import Api.Error
import Api.Util
import Snap.Core
import Snap.Http.Server
main :: IO ()
main = serve =<< commandLineConfig emptyConfig
serve :: Config Snap a -> IO ()
serve config = httpServe config $ route
[ ("budgets", method POST $ ensureAuthorised createBudget)
, ("budgets/:id", method GET $ ensureAuthorised getBudgetById)
, ("budgets/latest", method GET $ ensureAuthorised getLatestBudget)
]
ensureAuthorised :: (String -> Snap ()) -> Snap ()
ensureAuthorised f = do
auth <- extractAuthorization
case auth of
Just a -> f a
Nothing -> do
modifyResponse responseIntercept
unAuthorized "No auth credentials"
| Geeroar/ut-haskell | src/Api/Server.hs | apache-2.0 | 794 | 0 | 12 | 201 | 227 | 114 | 113 | 22 | 2 |
{-# OPTIONS_HADDOCK hide #-}
--------------------------------------------------------------------------------
-- |
-- Module : Graphics.Rendering.OpenGL.GL.Texturing.TexParameter
-- Copyright : (c) Sven Panne 2002-2013
-- License : BSD3
--
-- Maintainer : Sven Panne <[email protected]>
-- Stability : stable
-- Portability : portable
--
-- This is a purely internal module for getting\/setting texture parameters.
--
--------------------------------------------------------------------------------
module Graphics.Rendering.OpenGL.GL.Texturing.TexParameter (
TexParameter(..), texParami, texParamf, texParamC4f, getTexParameteri
) where
import Foreign.Marshal.Alloc
import Foreign.Marshal.Utils
import Foreign.Ptr
import Foreign.Storable
import Graphics.Rendering.OpenGL.GL.PeekPoke
import Graphics.Rendering.OpenGL.GL.StateVar
import Graphics.Rendering.OpenGL.GL.Texturing.TextureTarget
import Graphics.Rendering.OpenGL.GL.VertexSpec
import Graphics.Rendering.OpenGL.Raw
--------------------------------------------------------------------------------
data TexParameter =
TextureMinFilter
| TextureMagFilter
| TextureWrapS
| TextureWrapT
| TextureWrapR
| TextureBorderColor
| TextureMinLOD
| TextureMaxLOD
| TextureBaseLevel
| TextureMaxLevel
| TexturePriority
| TextureMaxAnisotropy
| TextureCompare
| TextureCompareOperator
| TextureCompareFailValue
| GenerateMipmap
| TextureCompareMode
| TextureCompareFunc
| DepthTextureMode
| TextureLODBias
| TextureResident
marshalTexParameter :: TexParameter -> GLenum
marshalTexParameter x = case x of
TextureMinFilter -> gl_TEXTURE_MIN_FILTER
TextureMagFilter -> gl_TEXTURE_MAG_FILTER
TextureWrapS -> gl_TEXTURE_WRAP_S
TextureWrapT -> gl_TEXTURE_WRAP_T
TextureWrapR -> gl_TEXTURE_WRAP_R
TextureBorderColor -> gl_TEXTURE_BORDER_COLOR
TextureMinLOD -> gl_TEXTURE_MIN_LOD
TextureMaxLOD -> gl_TEXTURE_MAX_LOD
TextureBaseLevel -> gl_TEXTURE_BASE_LEVEL
TextureMaxLevel -> gl_TEXTURE_MAX_LEVEL
TexturePriority -> gl_TEXTURE_PRIORITY
TextureMaxAnisotropy -> gl_TEXTURE_MAX_ANISOTROPY
TextureCompare -> 0x819A
TextureCompareOperator -> 0x819B
TextureCompareFailValue -> gl_TEXTURE_COMPARE_FAIL_VALUE
GenerateMipmap -> gl_GENERATE_MIPMAP
TextureCompareMode -> gl_TEXTURE_COMPARE_MODE
TextureCompareFunc -> gl_TEXTURE_COMPARE_FUNC
DepthTextureMode -> gl_DEPTH_TEXTURE_MODE
TextureLODBias -> gl_TEXTURE_LOD_BIAS
TextureResident -> gl_TEXTURE_RESIDENT
--------------------------------------------------------------------------------
texParameter :: ParameterizedTextureTarget t
=> (GLenum -> GLenum -> b -> IO ())
-> (a -> (b -> IO ()) -> IO ())
-> t -> TexParameter -> a -> IO ()
texParameter glTexParameter marshalAct t p x =
marshalAct x $
glTexParameter (marshalParameterizedTextureTarget t) (marshalTexParameter p)
--------------------------------------------------------------------------------
getTexParameter :: (Storable b, ParameterizedTextureTarget t)
=> (GLenum -> GLenum -> Ptr b -> IO ())
-> (b -> a)
-> t -> TexParameter -> IO a
getTexParameter glGetTexParameter unmarshal t p =
alloca $ \buf -> do
glGetTexParameter (marshalParameterizedTextureTarget t) (marshalTexParameter p) buf
peek1 unmarshal buf
--------------------------------------------------------------------------------
m2a :: (a -> b) -> a -> (b -> IO ()) -> IO ()
m2a marshal x act = act (marshal x)
texParami :: ParameterizedTextureTarget t =>
(GLint -> a) -> (a -> GLint) -> TexParameter -> t -> StateVar a
texParami unmarshal marshal p t =
makeStateVar
(getTexParameter glGetTexParameteriv unmarshal t p)
(texParameter glTexParameteri (m2a marshal) t p)
texParamf :: ParameterizedTextureTarget t =>
(GLfloat -> a) -> (a -> GLfloat) -> TexParameter -> t -> StateVar a
texParamf unmarshal marshal p t =
makeStateVar
(getTexParameter glGetTexParameterfv unmarshal t p)
(texParameter glTexParameterf (m2a marshal) t p)
texParamC4f :: ParameterizedTextureTarget t => TexParameter -> t -> StateVar (Color4 GLfloat)
texParamC4f p t =
makeStateVar
(getTexParameter glGetTexParameterC4f id t p)
(texParameter glTexParameterC4f with t p)
glTexParameterC4f :: GLenum -> GLenum -> Ptr (Color4 GLfloat) -> IO ()
glTexParameterC4f target pname ptr = glTexParameterfv target pname (castPtr ptr)
glGetTexParameterC4f :: GLenum -> GLenum -> Ptr (Color4 GLfloat) -> IO ()
glGetTexParameterC4f target pname ptr = glGetTexParameterfv target pname (castPtr ptr)
getTexParameteri :: ParameterizedTextureTarget t => (GLint -> a) -> t -> TexParameter -> IO a
getTexParameteri = getTexParameter glGetTexParameteriv
| mfpi/OpenGL | Graphics/Rendering/OpenGL/GL/Texturing/TexParameter.hs | bsd-3-clause | 4,868 | 0 | 14 | 841 | 1,044 | 556 | 488 | 97 | 21 |
{-
(c) The University of Glasgow 2006
(c) The GRASP/AQUA Project, Glasgow University, 1992-1998
\section[InstEnv]{Utilities for typechecking instance declarations}
The bits common to TcInstDcls and TcDeriv.
-}
{-# LANGUAGE CPP, DeriveDataTypeable #-}
module InstEnv (
DFunId, InstMatch, ClsInstLookupResult,
OverlapFlag(..), OverlapMode(..), setOverlapModeMaybe,
ClsInst(..), DFunInstType, pprInstance, pprInstanceHdr, pprInstances,
instanceHead, instanceSig, mkLocalInstance, mkImportedInstance,
instanceDFunId, tidyClsInstDFun, instanceRoughTcs,
fuzzyClsInstCmp, orphNamesOfClsInst,
InstEnvs(..), VisibleOrphanModules, InstEnv,
emptyInstEnv, extendInstEnv, deleteFromInstEnv, identicalClsInstHead,
extendInstEnvList, lookupUniqueInstEnv, lookupInstEnv, instEnvElts,
memberInstEnv, instIsVisible,
classInstances, instanceBindFun,
instanceCantMatch, roughMatchTcs,
isOverlappable, isOverlapping, isIncoherent
) where
#include "HsVersions.h"
import TcType -- InstEnv is really part of the type checker,
-- and depends on TcType in many ways
import CoreSyn ( IsOrphan(..), isOrphan, chooseOrphanAnchor )
import Module
import Class
import Var
import VarSet
import Name
import NameSet
import Unify
import Outputable
import ErrUtils
import BasicTypes
import UniqDFM
import Util
import Id
import Data.Data ( Data )
import Data.Maybe ( isJust, isNothing )
{-
************************************************************************
* *
ClsInst: the data type for type-class instances
* *
************************************************************************
-}
-- | A type-class instance. Note that there is some tricky laziness at work
-- here. See Note [ClsInst laziness and the rough-match fields] for more
-- details.
data ClsInst
= ClsInst { -- Used for "rough matching"; see
-- Note [ClsInst laziness and the rough-match fields]
-- INVARIANT: is_tcs = roughMatchTcs is_tys
is_cls_nm :: Name -- ^ Class name
, is_tcs :: [Maybe Name] -- ^ Top of type args
-- | @is_dfun_name = idName . is_dfun@.
--
-- We use 'is_dfun_name' for the visibility check,
-- 'instIsVisible', which needs to know the 'Module' which the
-- dictionary is defined in. However, we cannot use the 'Module'
-- attached to 'is_dfun' since doing so would mean we would
-- potentially pull in an entire interface file unnecessarily.
-- This was the cause of #12367.
, is_dfun_name :: Name
-- Used for "proper matching"; see Note [Proper-match fields]
, is_tvs :: [TyVar] -- Fresh template tyvars for full match
-- See Note [Template tyvars are fresh]
, is_cls :: Class -- The real class
, is_tys :: [Type] -- Full arg types (mentioning is_tvs)
-- INVARIANT: is_dfun Id has type
-- forall is_tvs. (...) => is_cls is_tys
-- (modulo alpha conversion)
, is_dfun :: DFunId -- See Note [Haddock assumptions]
, is_flag :: OverlapFlag -- See detailed comments with
-- the decl of BasicTypes.OverlapFlag
, is_orphan :: IsOrphan
}
deriving Data
-- | A fuzzy comparison function for class instances, intended for sorting
-- instances before displaying them to the user.
fuzzyClsInstCmp :: ClsInst -> ClsInst -> Ordering
fuzzyClsInstCmp x y =
stableNameCmp (is_cls_nm x) (is_cls_nm y) `mappend`
mconcat (map cmp (zip (is_tcs x) (is_tcs y)))
where
cmp (Nothing, Nothing) = EQ
cmp (Nothing, Just _) = LT
cmp (Just _, Nothing) = GT
cmp (Just x, Just y) = stableNameCmp x y
isOverlappable, isOverlapping, isIncoherent :: ClsInst -> Bool
isOverlappable i = hasOverlappableFlag (overlapMode (is_flag i))
isOverlapping i = hasOverlappingFlag (overlapMode (is_flag i))
isIncoherent i = hasIncoherentFlag (overlapMode (is_flag i))
{-
Note [ClsInst laziness and the rough-match fields]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we load 'instance A.C B.T' from A.hi, but suppose that the type B.T is
otherwise unused in the program. Then it's stupid to load B.hi, the data type
declaration for B.T -- and perhaps further instance declarations!
We avoid this as follows:
* is_cls_nm, is_tcs, is_dfun_name are all Names. We can poke them to our heart's
content.
* Proper-match fields. is_dfun, and its related fields is_tvs, is_cls, is_tys
contain TyVars, Class, Type, Class etc, and so are all lazy thunks. When we
poke any of these fields we'll typecheck the DFunId declaration, and hence
pull in interfaces that it refers to. See Note [Proper-match fields].
* Rough-match fields. During instance lookup, we use the is_cls_nm :: Name and
is_tcs :: [Maybe Name] fields to perform a "rough match", *without* poking
inside the DFunId. The rough-match fields allow us to say "definitely does not
match", based only on Names.
This laziness is very important; see Trac #12367. Try hard to avoid pulling on
the structured fields unless you really need the instance.
* Another place to watch is InstEnv.instIsVisible, which needs the module to
which the ClsInst belongs. We can get this from is_dfun_name.
* In is_tcs,
Nothing means that this type arg is a type variable
(Just n) means that this type arg is a
TyConApp with a type constructor of n.
This is always a real tycon, never a synonym!
(Two different synonyms might match, but two
different real tycons can't.)
NB: newtypes are not transparent, though!
-}
{-
Note [Template tyvars are fresh]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The is_tvs field of a ClsInst has *completely fresh* tyvars.
That is, they are
* distinct from any other ClsInst
* distinct from any tyvars free in predicates that may
be looked up in the class instance environment
Reason for freshness: we use unification when checking for overlap
etc, and that requires the tyvars to be distinct.
The invariant is checked by the ASSERT in lookupInstEnv'.
Note [Proper-match fields]
~~~~~~~~~~~~~~~~~~~~~~~~~
The is_tvs, is_cls, is_tys fields are simply cached values, pulled
out (lazily) from the dfun id. They are cached here simply so
that we don't need to decompose the DFunId each time we want
to match it. The hope is that the rough-match fields mean
that we often never poke the proper-match fields.
However, note that:
* is_tvs must be a superset of the free vars of is_tys
* is_tvs, is_tys may be alpha-renamed compared to the ones in
the dfun Id
Note [Haddock assumptions]
~~~~~~~~~~~~~~~~~~~~~~~~~~
For normal user-written instances, Haddock relies on
* the SrcSpan of
* the Name of
* the is_dfun of
* an Instance
being equal to
* the SrcSpan of
* the instance head type of
* the InstDecl used to construct the Instance.
-}
instanceDFunId :: ClsInst -> DFunId
instanceDFunId = is_dfun
tidyClsInstDFun :: (DFunId -> DFunId) -> ClsInst -> ClsInst
tidyClsInstDFun tidy_dfun ispec
= ispec { is_dfun = tidy_dfun (is_dfun ispec) }
instanceRoughTcs :: ClsInst -> [Maybe Name]
instanceRoughTcs = is_tcs
instance NamedThing ClsInst where
getName ispec = getName (is_dfun ispec)
instance Outputable ClsInst where
ppr = pprInstance
pprInstance :: ClsInst -> SDoc
-- Prints the ClsInst as an instance declaration
pprInstance ispec
= hang (pprInstanceHdr ispec)
2 (vcat [ text "--" <+> pprDefinedAt (getName ispec)
, ifPprDebug (ppr (is_dfun ispec)) ])
-- * pprInstanceHdr is used in VStudio to populate the ClassView tree
pprInstanceHdr :: ClsInst -> SDoc
-- Prints the ClsInst as an instance declaration
pprInstanceHdr (ClsInst { is_flag = flag, is_dfun = dfun })
= text "instance" <+> ppr flag <+> pprSigmaType (idType dfun)
pprInstances :: [ClsInst] -> SDoc
pprInstances ispecs = vcat (map pprInstance ispecs)
instanceHead :: ClsInst -> ([TyVar], Class, [Type])
-- Returns the head, using the fresh tyavs from the ClsInst
instanceHead (ClsInst { is_tvs = tvs, is_tys = tys, is_dfun = dfun })
= (tvs, cls, tys)
where
(_, _, cls, _) = tcSplitDFunTy (idType dfun)
-- | Collects the names of concrete types and type constructors that make
-- up the head of a class instance. For instance, given `class Foo a b`:
--
-- `instance Foo (Either (Maybe Int) a) Bool` would yield
-- [Either, Maybe, Int, Bool]
--
-- Used in the implementation of ":info" in GHCi.
--
-- The 'tcSplitSigmaTy' is because of
-- instance Foo a => Baz T where ...
-- The decl is an orphan if Baz and T are both not locally defined,
-- even if Foo *is* locally defined
orphNamesOfClsInst :: ClsInst -> NameSet
orphNamesOfClsInst (ClsInst { is_cls_nm = cls_nm, is_tys = tys })
= orphNamesOfTypes tys `unionNameSet` unitNameSet cls_nm
instanceSig :: ClsInst -> ([TyVar], [Type], Class, [Type])
-- Decomposes the DFunId
instanceSig ispec = tcSplitDFunTy (idType (is_dfun ispec))
mkLocalInstance :: DFunId -> OverlapFlag
-> [TyVar] -> Class -> [Type]
-> ClsInst
-- Used for local instances, where we can safely pull on the DFunId.
-- Consider using newClsInst instead; this will also warn if
-- the instance is an orphan.
mkLocalInstance dfun oflag tvs cls tys
= ClsInst { is_flag = oflag, is_dfun = dfun
, is_tvs = tvs
, is_dfun_name = dfun_name
, is_cls = cls, is_cls_nm = cls_name
, is_tys = tys, is_tcs = roughMatchTcs tys
, is_orphan = orph
}
where
cls_name = className cls
dfun_name = idName dfun
this_mod = ASSERT( isExternalName dfun_name ) nameModule dfun_name
is_local name = nameIsLocalOrFrom this_mod name
-- Compute orphanhood. See Note [Orphans] in InstEnv
(cls_tvs, fds) = classTvsFds cls
arg_names = [filterNameSet is_local (orphNamesOfType ty) | ty <- tys]
-- See Note [When exactly is an instance decl an orphan?]
orph | is_local cls_name = NotOrphan (nameOccName cls_name)
| all notOrphan mb_ns = ASSERT( not (null mb_ns) ) head mb_ns
| otherwise = IsOrphan
notOrphan NotOrphan{} = True
notOrphan _ = False
mb_ns :: [IsOrphan] -- One for each fundep; a locally-defined name
-- that is not in the "determined" arguments
mb_ns | null fds = [choose_one arg_names]
| otherwise = map do_one fds
do_one (_ltvs, rtvs) = choose_one [ns | (tv,ns) <- cls_tvs `zip` arg_names
, not (tv `elem` rtvs)]
choose_one nss = chooseOrphanAnchor (unionNameSets nss)
mkImportedInstance :: Name -- ^ the name of the class
-> [Maybe Name] -- ^ the types which the class was applied to
-> Name -- ^ the 'Name' of the dictionary binding
-> DFunId -- ^ the 'Id' of the dictionary.
-> OverlapFlag -- ^ may this instance overlap?
-> IsOrphan -- ^ is this instance an orphan?
-> ClsInst
-- Used for imported instances, where we get the rough-match stuff
-- from the interface file
-- The bound tyvars of the dfun are guaranteed fresh, because
-- the dfun has been typechecked out of the same interface file
mkImportedInstance cls_nm mb_tcs dfun_name dfun oflag orphan
= ClsInst { is_flag = oflag, is_dfun = dfun
, is_tvs = tvs, is_tys = tys
, is_dfun_name = dfun_name
, is_cls_nm = cls_nm, is_cls = cls, is_tcs = mb_tcs
, is_orphan = orphan }
where
(tvs, _, cls, tys) = tcSplitDFunTy (idType dfun)
{-
Note [When exactly is an instance decl an orphan?]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
(see MkIface.instanceToIfaceInst, which implements this)
Roughly speaking, an instance is an orphan if its head (after the =>)
mentions nothing defined in this module.
Functional dependencies complicate the situation though. Consider
module M where { class C a b | a -> b }
and suppose we are compiling module X:
module X where
import M
data T = ...
instance C Int T where ...
This instance is an orphan, because when compiling a third module Y we
might get a constraint (C Int v), and we'd want to improve v to T. So
we must make sure X's instances are loaded, even if we do not directly
use anything from X.
More precisely, an instance is an orphan iff
If there are no fundeps, then at least of the names in
the instance head is locally defined.
If there are fundeps, then for every fundep, at least one of the
names free in a *non-determined* part of the instance head is
defined in this module.
(Note that these conditions hold trivially if the class is locally
defined.)
************************************************************************
* *
InstEnv, ClsInstEnv
* *
************************************************************************
A @ClsInstEnv@ all the instances of that class. The @Id@ inside a
ClsInstEnv mapping is the dfun for that instance.
If class C maps to a list containing the item ([a,b], [t1,t2,t3], dfun), then
forall a b, C t1 t2 t3 can be constructed by dfun
or, to put it another way, we have
instance (...) => C t1 t2 t3, witnessed by dfun
-}
---------------------------------------------------
{-
Note [InstEnv determinism]
~~~~~~~~~~~~~~~~~~~~~~~~~~
We turn InstEnvs into a list in some places that don't directly affect
the ABI. That happens when we create output for `:info`.
Unfortunately that nondeterminism is nonlocal and it's hard to tell what it
affects without following a chain of functions. It's also easy to accidentally
make that nondeterminism affect the ABI. Furthermore the envs should be
relatively small, so it should be free to use deterministic maps here.
Testing with nofib and validate detected no difference between UniqFM and
UniqDFM. See also Note [Deterministic UniqFM]
-}
type InstEnv = UniqDFM ClsInstEnv -- Maps Class to instances for that class
-- See Note [InstEnv determinism]
-- | 'InstEnvs' represents the combination of the global type class instance
-- environment, the local type class instance environment, and the set of
-- transitively reachable orphan modules (according to what modules have been
-- directly imported) used to test orphan instance visibility.
data InstEnvs = InstEnvs {
ie_global :: InstEnv, -- External-package instances
ie_local :: InstEnv, -- Home-package instances
ie_visible :: VisibleOrphanModules -- Set of all orphan modules transitively
-- reachable from the module being compiled
-- See Note [Instance lookup and orphan instances]
}
-- | Set of visible orphan modules, according to what modules have been directly
-- imported. This is based off of the dep_orphs field, which records
-- transitively reachable orphan modules (modules that define orphan instances).
type VisibleOrphanModules = ModuleSet
newtype ClsInstEnv
= ClsIE [ClsInst] -- The instances for a particular class, in any order
instance Outputable ClsInstEnv where
ppr (ClsIE is) = pprInstances is
-- INVARIANTS:
-- * The is_tvs are distinct in each ClsInst
-- of a ClsInstEnv (so we can safely unify them)
-- Thus, the @ClassInstEnv@ for @Eq@ might contain the following entry:
-- [a] ===> dfun_Eq_List :: forall a. Eq a => Eq [a]
-- The "a" in the pattern must be one of the forall'd variables in
-- the dfun type.
emptyInstEnv :: InstEnv
emptyInstEnv = emptyUDFM
instEnvElts :: InstEnv -> [ClsInst]
instEnvElts ie = [elt | ClsIE elts <- eltsUDFM ie, elt <- elts]
-- See Note [InstEnv determinism]
-- | Test if an instance is visible, by checking that its origin module
-- is in 'VisibleOrphanModules'.
-- See Note [Instance lookup and orphan instances]
instIsVisible :: VisibleOrphanModules -> ClsInst -> Bool
instIsVisible vis_mods ispec
-- NB: Instances from the interactive package always are visible. We can't
-- add interactive modules to the set since we keep creating new ones
-- as a GHCi session progresses.
| isInteractiveModule mod = True
| IsOrphan <- is_orphan ispec = mod `elemModuleSet` vis_mods
| otherwise = True
where
mod = nameModule $ is_dfun_name ispec
classInstances :: InstEnvs -> Class -> [ClsInst]
classInstances (InstEnvs { ie_global = pkg_ie, ie_local = home_ie, ie_visible = vis_mods }) cls
= get home_ie ++ get pkg_ie
where
get env = case lookupUDFM env cls of
Just (ClsIE insts) -> filter (instIsVisible vis_mods) insts
Nothing -> []
-- | Checks for an exact match of ClsInst in the instance environment.
-- We use this when we do signature checking in TcRnDriver
memberInstEnv :: InstEnv -> ClsInst -> Bool
memberInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm } ) =
maybe False (\(ClsIE items) -> any (identicalClsInstHead ins_item) items)
(lookupUDFM inst_env cls_nm)
extendInstEnvList :: InstEnv -> [ClsInst] -> InstEnv
extendInstEnvList inst_env ispecs = foldl extendInstEnv inst_env ispecs
extendInstEnv :: InstEnv -> ClsInst -> InstEnv
extendInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm })
= addToUDFM_C add inst_env cls_nm (ClsIE [ins_item])
where
add (ClsIE cur_insts) _ = ClsIE (ins_item : cur_insts)
deleteFromInstEnv :: InstEnv -> ClsInst -> InstEnv
deleteFromInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm })
= adjustUDFM adjust inst_env cls_nm
where
adjust (ClsIE items) = ClsIE (filterOut (identicalClsInstHead ins_item) items)
identicalClsInstHead :: ClsInst -> ClsInst -> Bool
-- ^ True when when the instance heads are the same
-- e.g. both are Eq [(a,b)]
-- Used for overriding in GHCi
-- Obviously should be insenstive to alpha-renaming
identicalClsInstHead (ClsInst { is_cls_nm = cls_nm1, is_tcs = rough1, is_tys = tys1 })
(ClsInst { is_cls_nm = cls_nm2, is_tcs = rough2, is_tys = tys2 })
= cls_nm1 == cls_nm2
&& not (instanceCantMatch rough1 rough2) -- Fast check for no match, uses the "rough match" fields
&& isJust (tcMatchTys tys1 tys2)
&& isJust (tcMatchTys tys2 tys1)
{-
************************************************************************
* *
Looking up an instance
* *
************************************************************************
@lookupInstEnv@ looks up in a @InstEnv@, using a one-way match. Since
the env is kept ordered, the first match must be the only one. The
thing we are looking up can have an arbitrary "flexi" part.
Note [Instance lookup and orphan instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we are compiling a module M, and we have a zillion packages
loaded, and we are looking up an instance for C (T W). If we find a
match in module 'X' from package 'p', should be "in scope"; that is,
is p:X in the transitive closure of modules imported from M?
The difficulty is that the "zillion packages" might include ones loaded
through earlier invocations of the GHC API, or earlier module loads in GHCi.
They might not be in the dependencies of M itself; and if not, the instances
in them should not be visible. Trac #2182, #8427.
There are two cases:
* If the instance is *not an orphan*, then module X defines C, T, or W.
And in order for those types to be involved in typechecking M, it
must be that X is in the transitive closure of M's imports. So we
can use the instance.
* If the instance *is an orphan*, the above reasoning does not apply.
So we keep track of the set of orphan modules transitively below M;
this is the ie_visible field of InstEnvs, of type VisibleOrphanModules.
If module p:X is in this set, then we can use the instance, otherwise
we can't.
Note [Rules for instance lookup]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These functions implement the carefully-written rules in the user
manual section on "overlapping instances". At risk of duplication,
here are the rules. If the rules change, change this text and the
user manual simultaneously. The link may be this:
http://www.haskell.org/ghc/docs/latest/html/users_guide/type-class-extensions.html#instance-overlap
The willingness to be overlapped or incoherent is a property of the
instance declaration itself, controlled as follows:
* An instance is "incoherent"
if it has an INCOHERENT pragma, or
if it appears in a module compiled with -XIncoherentInstances.
* An instance is "overlappable"
if it has an OVERLAPPABLE or OVERLAPS pragma, or
if it appears in a module compiled with -XOverlappingInstances, or
if the instance is incoherent.
* An instance is "overlapping"
if it has an OVERLAPPING or OVERLAPS pragma, or
if it appears in a module compiled with -XOverlappingInstances, or
if the instance is incoherent.
compiled with -XOverlappingInstances.
Now suppose that, in some client module, we are searching for an instance
of the target constraint (C ty1 .. tyn). The search works like this.
* Find all instances I that match the target constraint; that is, the
target constraint is a substitution instance of I. These instance
declarations are the candidates.
* Find all non-candidate instances that unify with the target
constraint. Such non-candidates instances might match when the
target constraint is further instantiated. If all of them are
incoherent, proceed; if not, the search fails.
* Eliminate any candidate IX for which both of the following hold:
* There is another candidate IY that is strictly more specific;
that is, IY is a substitution instance of IX but not vice versa.
* Either IX is overlappable or IY is overlapping.
* If only one candidate remains, pick it. Otherwise if all remaining
candidates are incoherent, pick an arbitrary candidate. Otherwise fail.
Note [Overlapping instances] (NB: these notes are quite old)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Overlap is permitted, but only in such a way that one can make
a unique choice when looking up. That is, overlap is only permitted if
one template matches the other, or vice versa. So this is ok:
[a] [Int]
but this is not
(Int,a) (b,Int)
If overlap is permitted, the list is kept most specific first, so that
the first lookup is the right choice.
For now we just use association lists.
\subsection{Avoiding a problem with overlapping}
Consider this little program:
\begin{pseudocode}
class C a where c :: a
class C a => D a where d :: a
instance C Int where c = 17
instance D Int where d = 13
instance C a => C [a] where c = [c]
instance ({- C [a], -} D a) => D [a] where d = c
instance C [Int] where c = [37]
main = print (d :: [Int])
\end{pseudocode}
What do you think `main' prints (assuming we have overlapping instances, and
all that turned on)? Well, the instance for `D' at type `[a]' is defined to
be `c' at the same type, and we've got an instance of `C' at `[Int]', so the
answer is `[37]', right? (the generic `C [a]' instance shouldn't apply because
the `C [Int]' instance is more specific).
Ghc-4.04 gives `[37]', while ghc-4.06 gives `[17]', so 4.06 is wrong. That
was easy ;-) Let's just consult hugs for good measure. Wait - if I use old
hugs (pre-September99), I get `[17]', and stranger yet, if I use hugs98, it
doesn't even compile! What's going on!?
What hugs complains about is the `D [a]' instance decl.
\begin{pseudocode}
ERROR "mj.hs" (line 10): Cannot build superclass instance
*** Instance : D [a]
*** Context supplied : D a
*** Required superclass : C [a]
\end{pseudocode}
You might wonder what hugs is complaining about. It's saying that you
need to add `C [a]' to the context of the `D [a]' instance (as appears
in comments). But there's that `C [a]' instance decl one line above
that says that I can reduce the need for a `C [a]' instance to the
need for a `C a' instance, and in this case, I already have the
necessary `C a' instance (since we have `D a' explicitly in the
context, and `C' is a superclass of `D').
Unfortunately, the above reasoning indicates a premature commitment to the
generic `C [a]' instance. I.e., it prematurely rules out the more specific
instance `C [Int]'. This is the mistake that ghc-4.06 makes. The fix is to
add the context that hugs suggests (uncomment the `C [a]'), effectively
deferring the decision about which instance to use.
Now, interestingly enough, 4.04 has this same bug, but it's covered up
in this case by a little known `optimization' that was disabled in
4.06. Ghc-4.04 silently inserts any missing superclass context into
an instance declaration. In this case, it silently inserts the `C
[a]', and everything happens to work out.
(See `basicTypes/MkId:mkDictFunId' for the code in question. Search for
`Mark Jones', although Mark claims no credit for the `optimization' in
question, and would rather it stopped being called the `Mark Jones
optimization' ;-)
So, what's the fix? I think hugs has it right. Here's why. Let's try
something else out with ghc-4.04. Let's add the following line:
d' :: D a => [a]
d' = c
Everyone raise their hand who thinks that `d :: [Int]' should give a
different answer from `d' :: [Int]'. Well, in ghc-4.04, it does. The
`optimization' only applies to instance decls, not to regular
bindings, giving inconsistent behavior.
Old hugs had this same bug. Here's how we fixed it: like GHC, the
list of instances for a given class is ordered, so that more specific
instances come before more generic ones. For example, the instance
list for C might contain:
..., C Int, ..., C a, ...
When we go to look for a `C Int' instance we'll get that one first.
But what if we go looking for a `C b' (`b' is unconstrained)? We'll
pass the `C Int' instance, and keep going. But if `b' is
unconstrained, then we don't know yet if the more specific instance
will eventually apply. GHC keeps going, and matches on the generic `C
a'. The fix is to, at each step, check to see if there's a reverse
match, and if so, abort the search. This prevents hugs from
prematurely chosing a generic instance when a more specific one
exists.
--Jeff
v
BUT NOTE [Nov 2001]: we must actually *unify* not reverse-match in
this test. Suppose the instance envt had
..., forall a b. C a a b, ..., forall a b c. C a b c, ...
(still most specific first)
Now suppose we are looking for (C x y Int), where x and y are unconstrained.
C x y Int doesn't match the template {a,b} C a a b
but neither does
C a a b match the template {x,y} C x y Int
But still x and y might subsequently be unified so they *do* match.
Simple story: unify, don't match.
-}
type DFunInstType = Maybe Type
-- Just ty => Instantiate with this type
-- Nothing => Instantiate with any type of this tyvar's kind
-- See Note [DFunInstType: instantiating types]
type InstMatch = (ClsInst, [DFunInstType])
type ClsInstLookupResult
= ( [InstMatch] -- Successful matches
, [ClsInst] -- These don't match but do unify
, [InstMatch] ) -- Unsafe overlapped instances under Safe Haskell
-- (see Note [Safe Haskell Overlapping Instances] in
-- TcSimplify).
{-
Note [DFunInstType: instantiating types]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A successful match is a ClsInst, together with the types at which
the dfun_id in the ClsInst should be instantiated
The instantiating types are (Either TyVar Type)s because the dfun
might have some tyvars that *only* appear in arguments
dfun :: forall a b. C a b, Ord b => D [a]
When we match this against D [ty], we return the instantiating types
[Just ty, Nothing]
where the 'Nothing' indicates that 'b' can be freely instantiated.
(The caller instantiates it to a flexi type variable, which will
presumably later become fixed via functional dependencies.)
-}
-- |Look up an instance in the given instance environment. The given class application must match exactly
-- one instance and the match may not contain any flexi type variables. If the lookup is unsuccessful,
-- yield 'Left errorMessage'.
lookupUniqueInstEnv :: InstEnvs
-> Class -> [Type]
-> Either MsgDoc (ClsInst, [Type])
lookupUniqueInstEnv instEnv cls tys
= case lookupInstEnv False instEnv cls tys of
([(inst, inst_tys)], _, _)
| noFlexiVar -> Right (inst, inst_tys')
| otherwise -> Left $ text "flexible type variable:" <+>
(ppr $ mkTyConApp (classTyCon cls) tys)
where
inst_tys' = [ty | Just ty <- inst_tys]
noFlexiVar = all isJust inst_tys
_other -> Left $ text "instance not found" <+>
(ppr $ mkTyConApp (classTyCon cls) tys)
lookupInstEnv' :: InstEnv -- InstEnv to look in
-> VisibleOrphanModules -- But filter against this
-> Class -> [Type] -- What we are looking for
-> ([InstMatch], -- Successful matches
[ClsInst]) -- These don't match but do unify
-- The second component of the result pair happens when we look up
-- Foo [a]
-- in an InstEnv that has entries for
-- Foo [Int]
-- Foo [b]
-- Then which we choose would depend on the way in which 'a'
-- is instantiated. So we report that Foo [b] is a match (mapping b->a)
-- but Foo [Int] is a unifier. This gives the caller a better chance of
-- giving a suitable error message
lookupInstEnv' ie vis_mods cls tys
= lookup ie
where
rough_tcs = roughMatchTcs tys
all_tvs = all isNothing rough_tcs
--------------
lookup env = case lookupUDFM env cls of
Nothing -> ([],[]) -- No instances for this class
Just (ClsIE insts) -> find [] [] insts
--------------
find ms us [] = (ms, us)
find ms us (item@(ClsInst { is_tcs = mb_tcs, is_tvs = tpl_tvs
, is_tys = tpl_tys }) : rest)
| not (instIsVisible vis_mods item)
= find ms us rest -- See Note [Instance lookup and orphan instances]
-- Fast check for no match, uses the "rough match" fields
| instanceCantMatch rough_tcs mb_tcs
= find ms us rest
| Just subst <- tcMatchTys tpl_tys tys
= find ((item, map (lookupTyVar subst) tpl_tvs) : ms) us rest
-- Does not match, so next check whether the things unify
-- See Note [Overlapping instances] and Note [Incoherent instances]
| isIncoherent item
= find ms us rest
| otherwise
= ASSERT2( tyCoVarsOfTypes tys `disjointVarSet` tpl_tv_set,
(ppr cls <+> ppr tys <+> ppr all_tvs) $$
(ppr tpl_tvs <+> ppr tpl_tys)
)
-- Unification will break badly if the variables overlap
-- They shouldn't because we allocate separate uniques for them
-- See Note [Template tyvars are fresh]
case tcUnifyTys instanceBindFun tpl_tys tys of
Just _ -> find ms (item:us) rest
Nothing -> find ms us rest
where
tpl_tv_set = mkVarSet tpl_tvs
---------------
-- This is the common way to call this function.
lookupInstEnv :: Bool -- Check Safe Haskell overlap restrictions
-> InstEnvs -- External and home package inst-env
-> Class -> [Type] -- What we are looking for
-> ClsInstLookupResult
-- ^ See Note [Rules for instance lookup]
-- ^ See Note [Safe Haskell Overlapping Instances] in TcSimplify
-- ^ See Note [Safe Haskell Overlapping Instances Implementation] in TcSimplify
lookupInstEnv check_overlap_safe
(InstEnvs { ie_global = pkg_ie
, ie_local = home_ie
, ie_visible = vis_mods })
cls
tys
= -- pprTrace "lookupInstEnv" (ppr cls <+> ppr tys $$ ppr home_ie) $
(final_matches, final_unifs, unsafe_overlapped)
where
(home_matches, home_unifs) = lookupInstEnv' home_ie vis_mods cls tys
(pkg_matches, pkg_unifs) = lookupInstEnv' pkg_ie vis_mods cls tys
all_matches = home_matches ++ pkg_matches
all_unifs = home_unifs ++ pkg_unifs
final_matches = foldr insert_overlapping [] all_matches
-- Even if the unifs is non-empty (an error situation)
-- we still prune the matches, so that the error message isn't
-- misleading (complaining of multiple matches when some should be
-- overlapped away)
unsafe_overlapped
= case final_matches of
[match] -> check_safe match
_ -> []
-- If the selected match is incoherent, discard all unifiers
final_unifs = case final_matches of
(m:_) | isIncoherent (fst m) -> []
_ -> all_unifs
-- NOTE [Safe Haskell isSafeOverlap]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- We restrict code compiled in 'Safe' mode from overriding code
-- compiled in any other mode. The rationale is that code compiled
-- in 'Safe' mode is code that is untrusted by the ghc user. So
-- we shouldn't let that code change the behaviour of code the
-- user didn't compile in 'Safe' mode since that's the code they
-- trust. So 'Safe' instances can only overlap instances from the
-- same module. A same instance origin policy for safe compiled
-- instances.
check_safe (inst,_)
= case check_overlap_safe && unsafeTopInstance inst of
-- make sure it only overlaps instances from the same module
True -> go [] all_matches
-- most specific is from a trusted location.
False -> []
where
go bad [] = bad
go bad (i@(x,_):unchecked) =
if inSameMod x || isOverlappable x
then go bad unchecked
else go (i:bad) unchecked
inSameMod b =
let na = getName $ getName inst
la = isInternalName na
nb = getName $ getName b
lb = isInternalName nb
in (la && lb) || (nameModule na == nameModule nb)
-- We consider the most specific instance unsafe when it both:
-- (1) Comes from a module compiled as `Safe`
-- (2) Is an orphan instance, OR, an instance for a MPTC
unsafeTopInstance inst = isSafeOverlap (is_flag inst) &&
(isOrphan (is_orphan inst) || classArity (is_cls inst) > 1)
---------------
insert_overlapping :: InstMatch -> [InstMatch] -> [InstMatch]
-- ^ Add a new solution, knocking out strictly less specific ones
-- See Note [Rules for instance lookup]
insert_overlapping new_item [] = [new_item]
insert_overlapping new_item@(new_inst,_) (old_item@(old_inst,_) : old_items)
| new_beats_old -- New strictly overrides old
, not old_beats_new
, new_inst `can_override` old_inst
= insert_overlapping new_item old_items
| old_beats_new -- Old strictly overrides new
, not new_beats_old
, old_inst `can_override` new_inst
= old_item : old_items
-- Discard incoherent instances; see Note [Incoherent instances]
| isIncoherent old_inst -- Old is incoherent; discard it
= insert_overlapping new_item old_items
| isIncoherent new_inst -- New is incoherent; discard it
= old_item : old_items
-- Equal or incomparable, and neither is incoherent; keep both
| otherwise
= old_item : insert_overlapping new_item old_items
where
new_beats_old = new_inst `more_specific_than` old_inst
old_beats_new = old_inst `more_specific_than` new_inst
-- `instB` can be instantiated to match `instA`
-- or the two are equal
instA `more_specific_than` instB
= isJust (tcMatchTys (is_tys instB) (is_tys instA))
instA `can_override` instB
= isOverlapping instA || isOverlappable instB
-- Overlap permitted if either the more specific instance
-- is marked as overlapping, or the more general one is
-- marked as overlappable.
-- Latest change described in: Trac #9242.
-- Previous change: Trac #3877, Dec 10.
{-
Note [Incoherent instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
For some classes, the choice of a particular instance does not matter, any one
is good. E.g. consider
class D a b where { opD :: a -> b -> String }
instance D Int b where ...
instance D a Int where ...
g (x::Int) = opD x x -- Wanted: D Int Int
For such classes this should work (without having to add an "instance D Int
Int", and using -XOverlappingInstances, which would then work). This is what
-XIncoherentInstances is for: Telling GHC "I don't care which instance you use;
if you can use one, use it."
Should this logic only work when *all* candidates have the incoherent flag, or
even when all but one have it? The right choice is the latter, which can be
justified by comparing the behaviour with how -XIncoherentInstances worked when
it was only about the unify-check (note [Overlapping instances]):
Example:
class C a b c where foo :: (a,b,c)
instance C [a] b Int
instance [incoherent] [Int] b c
instance [incoherent] C a Int c
Thanks to the incoherent flags,
[Wanted] C [a] b Int
works: Only instance one matches, the others just unify, but are marked
incoherent.
So I can write
(foo :: ([a],b,Int)) :: ([Int], Int, Int).
but if that works then I really want to be able to write
foo :: ([Int], Int, Int)
as well. Now all three instances from above match. None is more specific than
another, so none is ruled out by the normal overlapping rules. One of them is
not incoherent, but we still want this to compile. Hence the
"all-but-one-logic".
The implementation is in insert_overlapping, where we remove matching
incoherent instances as long as there are others.
************************************************************************
* *
Binding decisions
* *
************************************************************************
-}
instanceBindFun :: TyCoVar -> BindFlag
instanceBindFun tv | isTcTyVar tv && isOverlappableTyVar tv = Skolem
| otherwise = BindMe
-- Note [Binding when looking up instances]
{-
Note [Binding when looking up instances]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When looking up in the instance environment, or family-instance environment,
we are careful about multiple matches, as described above in
Note [Overlapping instances]
The key_tys can contain skolem constants, and we can guarantee that those
are never going to be instantiated to anything, so we should not involve
them in the unification test. Example:
class Foo a where { op :: a -> Int }
instance Foo a => Foo [a] -- NB overlap
instance Foo [Int] -- NB overlap
data T = forall a. Foo a => MkT a
f :: T -> Int
f (MkT x) = op [x,x]
The op [x,x] means we need (Foo [a]). Without the filterVarSet we'd
complain, saying that the choice of instance depended on the instantiation
of 'a'; but of course it isn't *going* to be instantiated.
We do this only for isOverlappableTyVar skolems. For example we reject
g :: forall a => [a] -> Int
g x = op x
on the grounds that the correct instance depends on the instantiation of 'a'
-}
| sgillespie/ghc | compiler/types/InstEnv.hs | bsd-3-clause | 40,624 | 0 | 15 | 10,370 | 3,918 | 2,183 | 1,735 | -1 | -1 |
fibonacci n | n <= 1 = n
| n > 1 = fibonacci (n-1) + fibonacci (n-2)
| sushantmahajan/programs | haskell/first.hs | cc0-1.0 | 72 | 0 | 9 | 21 | 56 | 26 | 30 | 2 | 1 |
-------------------------------------------------------------------------------
-- |
-- Module : System.Hardware.Haskino.Test.ExprWord32
-- Copyright : (c) University of Kansas
-- License : BSD3
-- Stability : experimental
--
-- Quick Check tests for Expressions returning a Expr Word32
-------------------------------------------------------------------------------
{-# LANGUAGE GADTs #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE NegativeLiterals #-}
module System.Hardware.Haskino.Test.ExprInt32 where
import Prelude hiding
( quotRem, divMod, quot, rem, div, mod, properFraction, fromInteger, toInteger, (<*) )
import qualified Prelude as P
import System.Hardware.Haskino
import Data.Boolean
import Data.Boolean.Numbers
import Data.Boolean.Bits
import Data.Int
import Data.Word
import qualified Data.Bits as DB
import Test.QuickCheck hiding ((.&.))
import Test.QuickCheck.Monadic
litEval32 :: Expr Int32 -> Int32
litEval32 (LitI32 w) = w
litEvalB :: Expr Bool -> Bool
litEvalB (LitB w) = w
prop_neg :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Property
prop_neg c r x = monadicIO $ do
let local = negate x
remote <- run $ send c $ do
writeRemoteRefE r $ negate (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_sign :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Property
prop_sign c r x = monadicIO $ do
let local = signum x
remote <- run $ send c $ do
writeRemoteRefE r $ signum (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_add :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Property
prop_add c r x y = monadicIO $ do
let local = x + y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) + (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_sub :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Property
prop_sub c r x y = monadicIO $ do
let local = x - y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) - (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_mult :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Property
prop_mult c r x y = monadicIO $ do
let local = x * y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) * (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_div :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonZero Int32 -> Property
prop_div c r x (NonZero y) = monadicIO $ do
let local = x `P.div` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `div` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_rem :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonZero Int32 -> Property
prop_rem c r x (NonZero y) = monadicIO $ do
let local = x `P.rem` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `rem` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_quot :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonZero Int32 -> Property
prop_quot c r x (NonZero y) = monadicIO $ do
let local = x `P.quot` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `quot` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_mod :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonZero Int32 -> Property
prop_mod c r x (NonZero y) = monadicIO $ do
let local = x `P.mod` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `mod` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_comp :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Property
prop_comp c r x = monadicIO $ do
let local = DB.complement x
remote <- run $ send c $ do
writeRemoteRefE r $ complement (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_and :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Property
prop_and c r x y = monadicIO $ do
let local = x DB..&. y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) .&. (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_or :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Property
prop_or c r x y = monadicIO $ do
let local = x DB..|. y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) .|. (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_xor :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Property
prop_xor c r x y = monadicIO $ do
let local = x `DB.xor` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `xor` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_shiftL :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonNegative Int -> Property
prop_shiftL c r x (NonNegative y) = monadicIO $ do
let local = x `DB.shiftL` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `shiftL` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_shiftR :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonNegative Int -> Property
prop_shiftR c r x (NonNegative y) = monadicIO $ do
let local = x `DB.shiftR` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `shiftR` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_setBit :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonNegative Int -> Property
prop_setBit c r x (NonNegative y) = monadicIO $ do
let local = x `DB.setBit` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `setBit` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_clearBit :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> NonNegative Int -> Property
prop_clearBit c r x (NonNegative y) = monadicIO $ do
let local = x `DB.clearBit` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `clearBit` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_testBit :: ArduinoConnection -> RemoteRef Bool -> Int32 -> NonNegative Int -> Property
prop_testBit c r x (NonNegative y) = monadicIO $ do
let local = x `DB.testBit` y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) `testBit` (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_from8 :: ArduinoConnection -> RemoteRef Int32 -> Word8 -> Property
prop_from8 c r x = monadicIO $ do
let local = fromIntegral x
remote <- run $ send c $ do
writeRemoteRefE r $ fromIntegralB (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_from16 :: ArduinoConnection -> RemoteRef Int32 -> Word16 -> Property
prop_from16 c r x = monadicIO $ do
let local = fromIntegral x
remote <- run $ send c $ do
writeRemoteRefE r $ fromIntegralB (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_from32 :: ArduinoConnection -> RemoteRef Int32 -> Word32 -> Property
prop_from32 c r x = monadicIO $ do
let local = fromIntegral x
remote <- run $ send c $ do
writeRemoteRefE r $ fromIntegralB (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_fromI8 :: ArduinoConnection -> RemoteRef Int32 -> Int8 -> Property
prop_fromI8 c r x = monadicIO $ do
let local = fromIntegral x
remote <- run $ send c $ do
writeRemoteRefE r $ fromIntegralB (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_fromI16 :: ArduinoConnection -> RemoteRef Int32 -> Int16 -> Property
prop_fromI16 c r x = monadicIO $ do
let local = fromIntegral x
remote <- run $ send c $ do
writeRemoteRefE r $ fromIntegralB (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_fromFTrunc :: ArduinoConnection -> RemoteRef Int32 -> Float -> Property
prop_fromFTrunc c r x = monadicIO $ do
let local = P.truncate x
remote <- run $ send c $ do
writeRemoteRefE r $ Data.Boolean.Numbers.truncate (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_fromFRound :: ArduinoConnection -> RemoteRef Int32 -> Float -> Property
prop_fromFRound c r x = monadicIO $ do
let local = P.round x
remote <- run $ send c $ do
writeRemoteRefE r $ Data.Boolean.Numbers.round (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_fromFCeil :: ArduinoConnection -> RemoteRef Int32 -> Float -> Property
prop_fromFCeil c r x = monadicIO $ do
let local = P.ceiling x
remote <- run $ send c $ do
writeRemoteRefE r $ Data.Boolean.Numbers.ceiling (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_fromFFloor :: ArduinoConnection -> RemoteRef Int32 -> Float -> Property
prop_fromFFloor c r x = monadicIO $ do
let local = P.floor x
remote <- run $ send c $ do
writeRemoteRefE r $ Data.Boolean.Numbers.floor (lit x)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_ifb :: ArduinoConnection -> RemoteRef Int32 -> Bool -> Int32 -> Int32 -> Property
prop_ifb c r b x y = monadicIO $ do
let local = if b then x + y else x - y
remote <- run $ send c $ do
writeRemoteRefE r $ ifB (lit b) (lit x + lit y) (lit x - lit y)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_eq :: ArduinoConnection -> RemoteRef Bool -> Int32 -> Int32 -> Property
prop_eq c r x y = monadicIO $ do
let local = x == y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) ==* (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_neq :: ArduinoConnection -> RemoteRef Bool -> Int32 -> Int32 -> Property
prop_neq c r x y = monadicIO $ do
let local = x /= y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) /=* (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_lt :: ArduinoConnection -> RemoteRef Bool -> Int32 -> Int32 -> Property
prop_lt c r x y = monadicIO $ do
let local = x < y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) <* (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_gt :: ArduinoConnection -> RemoteRef Bool -> Int32 -> Int32 -> Property
prop_gt c r x y = monadicIO $ do
let local = x > y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) >* (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_lte :: ArduinoConnection -> RemoteRef Bool -> Int32 -> Int32 -> Property
prop_lte c r x y = monadicIO $ do
let local = x <= y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) <=* (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_gte :: ArduinoConnection -> RemoteRef Bool -> Int32 -> Int32 -> Property
prop_gte c r x y = monadicIO $ do
let local = x >= y
remote <- run $ send c $ do
writeRemoteRefE r $ (lit x) >=* (lit y)
v <- readRemoteRefE r
return v
assert (local == litEvalB remote)
prop_arith :: ArduinoConnection -> RemoteRef Int32 ->
Int32 -> Int32 -> Int32 -> Int32 -> Int32 -> NonZero Int32 -> Property
prop_arith c r a b d e f (NonZero g) = monadicIO $ do
let local = a * b + d * e - f `P.div` g
remote <- run $ send c $ do
writeRemoteRefE r $ (lit a) * (lit b) + (lit d) * (lit e) - (lit f) `div` (lit g)
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_bind :: ArduinoConnection -> RemoteRef Int32 -> Int32 -> Int32 -> Int32 -> Int32 -> Property
prop_bind c r a b d e = monadicIO $ do
let local = a * b + d * e
remote <- run $ send c $ do
writeRemoteRefE r $ (lit a)
a' <- readRemoteRefE r
writeRemoteRefE r $ a' * (lit b)
ab' <- readRemoteRefE r
writeRemoteRefE r $ (lit d)
d' <- readRemoteRefE r
v <- readRemoteRefE r
writeRemoteRefE r $ d' * (lit e)
de' <- readRemoteRefE r
writeRemoteRefE r $ ab' + de'
v <- readRemoteRefE r
return v
assert (local == litEval32 remote)
prop_while :: ArduinoConnection -> Int8 -> Property
prop_while c x = monadicIO $ do
let x'::Int32 = fromIntegral x
let local = x'
remote <- run $ send c $ do
v <- whileE (lit (-128::Int32)) (\z -> z <* lit x') (\z -> return $ z + 1)
return v
assert (local == litEval32 remote)
main :: IO ()
main = do
conn <- openArduino False "/dev/cu.usbmodem1421"
refI32 <- send conn $ newRemoteRefE 0
refB <- send conn $ newRemoteRefE (lit False)
print "Negation Tests:"
quickCheck (prop_neg conn refI32)
print "Signum Tests:"
quickCheck (prop_sign conn refI32)
print "Addition Tests:"
quickCheck (prop_add conn refI32)
print "Subtraction Tests:"
quickCheck (prop_sub conn refI32)
print "Multiplcation Tests:"
quickCheck (prop_mult conn refI32)
print "Division Tests:"
quickCheck (prop_div conn refI32)
print "Remainder Tests:"
quickCheck (prop_rem conn refI32)
print "Quotient Tests:"
quickCheck (prop_quot conn refI32)
print "Modulo Tests:"
quickCheck (prop_mod conn refI32)
print "Complement Tests:"
quickCheck (prop_comp conn refI32)
print "Bitwise And Tests:"
quickCheck (prop_and conn refI32)
print "Bitwise Or Tests:"
quickCheck (prop_or conn refI32)
print "Bitwise Xor Tests:"
quickCheck (prop_xor conn refI32)
print "Shift Left Tests:"
quickCheck (prop_shiftL conn refI32)
print "Shift Right Tests:"
quickCheck (prop_shiftR conn refI32)
print "Set Bit Tests:"
quickCheck (prop_setBit conn refI32)
print "Clear Bit Tests:"
quickCheck (prop_clearBit conn refI32)
print "Test Bit Tests:"
quickCheck (prop_testBit conn refB)
print "From Word8 Tests:"
quickCheck (prop_from8 conn refI32)
print "From Word16 Tests:"
quickCheck (prop_from16 conn refI32)
print "From Word32 Tests:"
quickCheck (prop_from32 conn refI32)
print "From Int8 Tests:"
quickCheck (prop_fromI8 conn refI32)
print "From Int16 Tests:"
quickCheck (prop_fromI16 conn refI32)
print "From Float Truncate Tests:"
quickCheck (prop_fromFTrunc conn refI32)
print "From Float Round Tests:"
quickCheck (prop_fromFRound conn refI32)
print "From Float Ceiling Tests:"
quickCheck (prop_fromFCeil conn refI32)
print "From Float Floor Tests:"
quickCheck (prop_fromFFloor conn refI32)
print "ifB Tests:"
quickCheck (prop_ifb conn refI32)
print "Equal Tests:"
quickCheck (prop_eq conn refB)
print "Not Equal Tests:"
quickCheck (prop_neq conn refB)
print "Less Than Tests:"
quickCheck (prop_lt conn refB)
print "Greater Than Tests:"
quickCheck (prop_gt conn refB)
print "Less Than Equal Tests:"
quickCheck (prop_lte conn refB)
print "Greater Than Equal Tests:"
quickCheck (prop_gte conn refB)
print "Arithemtic Tests:"
quickCheck (prop_arith conn refI32)
print "Bind Tests:"
quickCheck (prop_bind conn refI32)
print "While Tests:"
quickCheck (prop_while conn)
closeArduino conn
| ku-fpg/kansas-amber | tests/ExprTests/ExprInt32.hs | bsd-3-clause | 15,947 | 0 | 19 | 4,277 | 6,125 | 2,810 | 3,315 | 406 | 2 |
module Distribution.Nixpkgs.Haskell.Constraint
( Constraint, satisfiesConstraint, satisfiesConstraints
) where
import Distribution.Package
import Distribution.Version
import Distribution.Nixpkgs.Haskell.OrphanInstances ( )
type Constraint = Dependency
satisfiesConstraint :: PackageIdentifier -> Constraint -> Bool
satisfiesConstraint (PackageIdentifier pn v) (Dependency cn vr) = (pn /= cn) || (v `withinRange` vr)
satisfiesConstraints :: PackageIdentifier -> [Constraint] -> Bool
satisfiesConstraints p = all (satisfiesConstraint p)
| Fuuzetsu/cabal2nix | src/Distribution/Nixpkgs/Haskell/Constraint.hs | bsd-3-clause | 543 | 0 | 7 | 63 | 137 | 79 | 58 | 10 | 1 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="de-DE">
<title>All In One Notes Add-On</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | thc202/zap-extensions | addOns/allinonenotes/src/main/javahelp/org/zaproxy/zap/extension/allinonenotes/resources/help_de_DE/helpset_de_DE.hs | apache-2.0 | 968 | 77 | 67 | 159 | 417 | 211 | 206 | -1 | -1 |
<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="hu-HU">
<title>Directory List v2.3 LC</title>
<maps>
<homeID>directorylistv2_3_lc</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | kingthorin/zap-extensions | addOns/directorylistv2_3_lc/src/main/javahelp/help_hu_HU/helpset_hu_HU.hs | apache-2.0 | 984 | 78 | 66 | 158 | 414 | 210 | 204 | -1 | -1 |
{- Refactoring: move the definiton 'fringe' to module C1. This example aims
to test the moving of the definition and the modification of export/import -}
module D6(fringe, sumSquares) where
import C6
fringe :: Tree a -> [a]
fringe (Leaf x) = [x]
fringe (Branch left right) = fringe left ++ fringe right
sumSquares (x:xs) = sq x + sumSquares xs
sumSquares [] = 0
sq x = x ^pow
pow = 2
| SAdams601/HaRe | old/testing/moveDefBtwMods/D6.hs | bsd-3-clause | 439 | 0 | 7 | 125 | 126 | 66 | 60 | 9 | 1 |
-- Copyright (C) 2015 Michael Alan Dorman <[email protected]>
-- This file is not part of GNU Emacs.
-- This program is free software; you can redistribute it and/or modify it under
-- the terms of the GNU General Public License as published by the Free Software
-- Foundation, either version 3 of the License, or (at your option) any later
-- version.
-- This program is distributed in the hope that it will be useful, but WITHOUT
-- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-- FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
-- details.
-- You should have received a copy of the GNU General Public License along with
-- this program. If not, see <http://www.gnu.org/licenses/>.
import Data.Version (Version (Version))
import Distribution.Simple.Utils (cabalVersion)
import System.Environment (getArgs)
data Mode
= GHC
| HLint
define :: Mode -> String -> String
define GHC def = "-D" ++ def
define HLint def = "--cpp-define=" ++ def
legacyFlags :: Mode -> [String]
legacyFlags mode = [define mode "USE_COMPILER_ID"]
isLegacyCabal :: Bool
isLegacyCabal = cabalVersion < Version [1, 22] []
getMode :: [String] -> Mode
getMode ("hlint":_) = HLint
getMode _ = GHC
main :: IO ()
main = do
args <- getArgs
mapM_ putStrLn (flags (getMode args))
where
flags mode =
if isLegacyCabal
then legacyFlags mode
else []
| robin-liu-1983/emacs-robin.d | emacs.d/elpa/flycheck-haskell-20160118.851/get-flags.hs | mit | 1,434 | 0 | 11 | 287 | 254 | 142 | 112 | 24 | 2 |
{-# LANGUAGE TemplateHaskell #-}
{-# OPTIONS_GHC -dth-dec-file #-}
module T8624 (THDec(..)) where
import Language.Haskell.TH
$(return [DataD [] (mkName "THDec") [] [NormalC (mkName "THDec") []] []])
| urbanslug/ghc | testsuite/tests/th/T8624.hs | bsd-3-clause | 201 | 0 | 13 | 27 | 73 | 40 | 33 | 5 | 0 |
{-# LANGUAGE Arrows, ViewPatterns #-}
module T3964 where
import Control.Arrow
testF :: Eq a => a -> (Maybe (Maybe a)) -> Maybe a
testF v = proc x -> case x of
Just (Just ((==v) -> True)) -> returnA -< Just v
_ -> returnA -< Nothing
| forked-upstream-packages-for-ghcjs/ghc | testsuite/tests/arrows/should_compile/T3964.hs | bsd-3-clause | 271 | 1 | 14 | 86 | 107 | 55 | 52 | 7 | 2 |
{-# LANGUAGE TypeFamilies #-}
module OverC
where
import OverA (C, D)
data instance C [a] [Int] = C9ListList
type instance D [a] [Int] = Char
| siddhanathan/ghc | testsuite/tests/indexed-types/should_fail/OverC.hs | bsd-3-clause | 144 | 0 | 6 | 28 | 54 | 33 | 21 | 5 | 0 |
module Http.Tests.Request (tests) where
------------------------------------------------------------------------------
import qualified Http.Request as Request
import Test.Framework
import Test.Framework.Providers.HUnit
import Test.HUnit hiding (Test)
------------------------------------------------------------------------------
tests :: [Test]
tests = [ testTheTests ]
------------------------------------------------------------------------------
testAddHeader :: Test
testAddHeader = testCase "request/test" $ assertEqual "2 is" (1 + 1) 3
| ndreynolds/hsURL | test/Http/Tests/Request.hs | mit | 617 | 0 | 8 | 116 | 93 | 58 | 35 | 9 | 1 |
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE FlexibleContexts #-}
module Text.Greek.IO.Process where
import Prelude hiding (words)
import Control.Monad.Except
import Data.Map (Map)
import Text.Greek.IO.Stage
import Text.Greek.Source.FileReference
import qualified Control.Lens as Lens
import qualified Data.Foldable as Foldable
import qualified Data.List as List
import qualified Data.Map as Map
import qualified Data.Maybe as Maybe
import qualified Data.Text as Text
import qualified Data.Tuple as Tuple
import qualified Text.Greek.IO.Json as Json
import qualified Text.Greek.IO.Morphgnt as Morphgnt
import qualified Text.Greek.IO.Type as Type
import qualified Text.Greek.IO.Utility as Utility
import qualified Text.Greek.Source.All as All
import qualified Text.Greek.Source.Work as Work
import qualified Text.Greek.Phonology.Consonant as Consonant
import qualified Text.Greek.Script.Abstract as Abstract
import qualified Text.Greek.Script.Concrete as Concrete
import qualified Text.Greek.Script.Elision as Elision
import qualified Text.Greek.Script.Mark as Mark
import qualified Text.Greek.Script.Marked as Marked
import qualified Text.Greek.Script.Place as Place
import qualified Text.Greek.Script.Punctuation as Punctuation
import qualified Text.Greek.Script.Syllable as Syllable
import qualified Text.Greek.Script.Word as Word
import qualified Text.Greek.Script.Unicode as Unicode
import qualified Text.Greek.Utility as Utility
processSblgnt :: ExceptT String IO ()
processSblgnt = do
sourceWords <- All.loadSblgnt
_ <- liftIO $ putStrLn "Processing"
let (stage0, composedWords) = makeStage0 sourceWords
let (stage1, decomposedWords) = makeStage1 composedWords
(stage2, unicodeLetterMarks) <- Utility.handleError $ tryMakeStage2 decomposedWords
(stage3, concreteLetterConcreteMarks) <- Utility.handleMaybe "stage3" $ tryMakeStage3 unicodeLetterMarks
(stage4, abstractLetterConcreteMarks) <- Utility.handleMaybe "stage4" $ tryMakeStage4 concreteLetterConcreteMarks
(stage5, abstractLetterMarkGroup) <- Utility.handleMaybe "stage5" $ tryMakeStage5 abstractLetterConcreteMarks
let (stage6, vowelConsonantMarkGroup) = makeStage6 abstractLetterMarkGroup
(stage7, vocalicSyllableABConsonantRh) <- Utility.handleMaybe "stage7" $ makeStage7 vowelConsonantMarkGroup
(stage8, syllableRhAB) <- Utility.handleMaybe "stage8" $ makeStage8 vocalicSyllableABConsonantRh
(stage9, syllableRBA) <- Utility.handleMaybe "stage9" $ makeStage9 syllableRhAB
(stage10, syllableRBA') <- Utility.handleMaybe "stage10" $ makeStage10 syllableRBA
let (stage11, _) = Morphgnt.makeStage syllableRBA'
let
stages =
[ stage0
, stage1
, stage2
, stage3
, stage4
, stage5
, stage6
, stage7
, stage8
, stage9
, stage10
, stage11
]
let indexedStages = indexStages stages
let indexedTypeDatas = getIndexedStageTypeDatas indexedStages
let storedTypeDatas = fmap snd indexedTypeDatas
let storedTypes = fmap typeDataJson storedTypeDatas
liftIO $ putStrLn "Writing types"
liftIO $ Json.writeTypes storedTypes
let typeNameMap = Map.fromList . fmap (\(i,t) -> (typeDataName t, i)) $ indexedTypeDatas
specialTypes <- Utility.handleMaybe "special types" $ Json.SpecialTypes
<$> Map.lookup Type.SourceWord typeNameMap
<*> Map.lookup Type.WordPrefix typeNameMap
<*> Map.lookup Type.WordSuffix typeNameMap
workInfoTypeIndexes <- Utility.handleMaybe "workInfoTypeIndexes" $
lookupAll typeNameMap
[ Type.SourceWord
, Type.Verse
, Type.WorkSource
]
summaryTypeIndexes <- Utility.handleMaybe "summaryTypeIndexes" $
lookupAll typeNameMap
[ Type.MorphgntLemma
, Type.MorphgntPartOfSpeech
, Type.MorphgntParsingCode
, Type.ListScriptSyllableConsonantRB
, (Type.Count Type.Syllable)
, Type.WordAccent
, Type.InitialEnclitic
, Type.Crasis
, Type.Elision
, Type.Verse
, Type.ParagraphNumber
]
let instanceMap = Json.makeInstanceMap storedTypes
let ourWorks = getWorks summaryTypeIndexes instanceMap sourceWords
liftIO $ putStrLn "Writing works"
liftIO $ Json.writeWorks ourWorks
let ourWorkInfos = fmap (Json.workToWorkInfo workInfoTypeIndexes) ourWorks
let ourTypeInfos = fmap Json.makeTypeInfo storedTypes
let ourStageInfos = fmap getStageInfo indexedStages
let ourIndex = Json.Index ourWorkInfos ourTypeInfos specialTypes ourStageInfos
liftIO $ putStrLn "Writing index"
liftIO $ Json.writeIndex ourIndex
getStageInfo :: Stage (Json.TypeIndex, a) -> Json.StageInfo
getStageInfo (Stage p ps) = Json.StageInfo (fst p) (fmap fst ps)
makeStage0 :: [Work.Indexed [Word.Word Word.Basic Word.SourceInfo]]
-> ( Stage TypeData
, [Work.Indexed [Word.Word Word.Basic [Unicode.Composed]]]
)
makeStage0 sourceWords = (stage, composedWords)
where
composedWords = toComposedWords sourceWords
stage = Stage primaryType typeParts
primaryType = makeSurfaceType Json.WordStageTypeKind Type.UnicodeComposed composedWords
typeParts =
[ makeWordPartType Json.WordPropertyTypeKind Type.SourceWord (pure . Word.getSourceInfoWord . Word.getSurface) sourceWords
, makeWordPartType Json.WordPropertyTypeKind Type.SourceFile (pure . _fileReferencePath . Word.getSourceInfoFile . Word.getSurface) sourceWords
, makeWordPartType Json.WordPropertyTypeKind Type.SourceFileLocation (pure . (\(FileReference _ l1 l2) -> (l1, l2)) . Word.getSourceInfoFile . Word.getSurface) sourceWords
, makeWordPartType Json.WordPropertyTypeKind Type.ParagraphNumber (Lens.toListOf (Word.info . Word.paragraphIndexLens)) sourceWords
, makeWordPartType Json.WordPropertyTypeKind Type.Verse (Lens.toListOf (Word.info . Word.verseLens)) sourceWords
, makeWordPartType Json.WordPropertyTypeKind Type.WordPrefix (Lens.toListOf (Word.info . Word.prefixLens)) sourceWords
, makeWordPartType Json.WordPropertyTypeKind Type.WordSuffix (Lens.toListOf (Word.info . Word.suffixLens)) sourceWords
, makeWorkInfoType Json.WorkPropertyTypeKind Type.WorkSource (Lens.view Lens._2) sourceWords
, makeWorkInfoType Json.WorkPropertyTypeKind Type.WorkTitle (Lens.view Lens._3) sourceWords
]
makeStage1 :: [Work.Indexed [Word.Word Word.Basic [Unicode.Composed]]]
-> ( Stage TypeData
, [Work.Indexed [Word.Word Word.Basic [Unicode.Decomposed]]]
)
makeStage1 composedWords = (stage, decomposedWords)
where
decomposedWordPairs = toDecomposedWordPairs composedWords
decomposedWords = toDecomposedWords decomposedWordPairs
stage = Stage primaryType typeParts
primaryType = makeSurfaceType Json.WordStageTypeKind Type.UnicodeDecomposed decomposedWords
typeParts =
[ makeSurfaceType Json.WordStageFunctionTypeKind (Type.Function Type.UnicodeComposed (Type.List Type.UnicodeDecomposed)) decomposedWordPairs
]
tryMakeStage2 :: WordSurface Word.Basic [Unicode.Decomposed]
-> Either Unicode.Error (Stage TypeData, WordSurface Word.Elision [Marked.Unit Unicode.Letter [Unicode.Mark]])
tryMakeStage2 decomposedWords = (,) <$> mStage <*> mUnicodeLetterMarks
where
decomposedWordsE = splitDecomposedElision decomposedWords
mUnicodeLetterMarksPairs = toUnicodeLetterMarksPairs decomposedWordsE
mUnicodeLetterMarks = toUnicodeLetterMarks <$> mUnicodeLetterMarksPairs
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind Type.UnicodeLetterMarks <$> mUnicodeLetterMarks
mTypeParts = sequence
[ makeSurfaceType Json.WordStageFunctionTypeKind (Type.Function (Type.List Type.UnicodeDecomposed) Type.UnicodeLetterMarks) <$> mUnicodeLetterMarksPairs
, makeSurfacePartType Json.WordStagePartTypeKind Type.UnicodeLetter (pure . Marked._item) <$> mUnicodeLetterMarks
, makeSurfacePartType Json.WordStagePartTypeKind Type.UnicodeMark Marked._marks <$> mUnicodeLetterMarks
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.AbstractLetter) (pure . Word.LetterCount . length . Word.getSurface) <$> mUnicodeLetterMarks
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.ConcreteMark) (pure . Word.MarkCount . sum . fmap (length . Marked._marks) . Word.getSurface) <$> mUnicodeLetterMarks
, pure $ makeWordPartType Json.WordPropertyTypeKind Type.Elision (pure . Lens.view (Word.info . Word.elisionLens . Lens._1)) decomposedWordsE
, pure $ makeWordPartType Json.WordPropertyTypeKind Type.UnicodeElision (Lens.toListOf (Word.info . Word.elisionLens . Lens._2 . Lens._Just)) decomposedWordsE
]
tryMakeStage3 :: WordSurface (Word.IndexedP a) [Marked.Unit Unicode.Letter [Unicode.Mark]]
-> Maybe (Stage TypeData, WordSurface (Word.IndexedP a) [Marked.Unit Concrete.Letter [Concrete.Mark]])
tryMakeStage3 unicodeLetterMarks = (,) <$> mStage <*> mConcreteLetterConcreteMarks
where
mMarkedUnicodeConcretePairsLM = toMarkedConcreteLetters unicodeLetterMarks >>= toMarkedConcreteMarks
mMarkedUnicodeConcretePairsB = toMarkedUnicodeConcretePairs <$> mMarkedUnicodeConcretePairsLM
mConcreteLetterConcreteMarks = Lens.over (Lens._Just . wordSurfaceLens . traverse) snd mMarkedUnicodeConcretePairsB
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind Type.ConcreteLetterMarks <$> mConcreteLetterConcreteMarks
mTypeParts = sequence
[ makeSurfaceType Json.WordStageFunctionTypeKind (Type.Function Type.UnicodeLetterMarks Type.ConcreteLetterMarks) <$> mMarkedUnicodeConcretePairsB
, makeSurfacePartType Json.WordStagePartFunctionTypeKind (Type.Function Type.UnicodeLetter Type.ConcreteLetter) (pure . Marked._item) <$> mMarkedUnicodeConcretePairsLM
, makeSurfacePartType Json.WordStagePartFunctionTypeKind (Type.Function Type.UnicodeMark Type.ConcreteMark) Marked._marks <$> mMarkedUnicodeConcretePairsLM
, makeSurfacePartType Json.WordStagePartTypeKind Type.ConcreteLetter (pure . Marked._item) <$> mConcreteLetterConcreteMarks
, makeSurfacePartType Json.WordStagePartTypeKind Type.ConcreteMark Marked._marks <$> mConcreteLetterConcreteMarks
]
tryMakeStage4 :: WordSurface Word.Elision [Marked.Unit Concrete.Letter [Concrete.Mark]]
-> Maybe (Stage TypeData, WordSurface Word.Capital [Marked.Unit Abstract.Letter [Concrete.Mark]])
tryMakeStage4 concreteLetterConcreteMarks = (,) <$> mStage <*> mCapMarkedAbstractLetters
where
markedAbstractLetterPairs = Lens.over (wordSurfaceLens . traverse . Marked.item) (\x -> (x, Abstract.toLetterCaseFinal x)) concreteLetterConcreteMarks
markedAbstractLettersCF = Lens.over (wordSurfaceLens . traverse . Marked.item) snd markedAbstractLetterPairs
mCapMarkedAbstractLetters = toCapitalWord markedAbstractLettersCF >>= validateFinalForm
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind Type.AbstractLetterMarks <$> mCapMarkedAbstractLetters
mTypeParts = sequence
[ pure $ makeSurfacePartType Json.WordStagePartFunctionTypeKind (Type.Function Type.ConcreteLetter Type.AbstractLetterCaseFinal) (pure . Marked._item) markedAbstractLetterPairs
, pure $ makeSurfaceType Json.WordStageTypeKind Type.AbstractLetterCaseFinalMarks markedAbstractLettersCF
, pure $ makeSurfacePartType Json.WordStagePartTypeKind Type.AbstractLetter (pure . Lens.view (Marked.item . Lens._1)) markedAbstractLettersCF
, pure $ makeSurfacePartType Json.WordStagePartTypeKind Type.LetterCase (pure . Lens.view (Marked.item . Lens._2)) markedAbstractLettersCF
, pure $ makeSurfacePartType Json.WordStagePartTypeKind Type.LetterFinalForm (pure . Lens.view (Marked.item . Lens._3)) markedAbstractLettersCF
, pure $ makeIndexedSurfacePartType Json.CompositePropertyTypeKind Type.LetterCase Abstract.CaseIndex (Lens.view (Marked.item . Lens._2)) markedAbstractLettersCF
, pure $ makeReverseIndexedSurfacePartType Json.CompositePropertyTypeKind Type.LetterFinalForm Abstract.FinalReverseIndex (Lens.view (Marked.item . Lens._3)) markedAbstractLettersCF
, pure $ makeIndexedSurfacePartType Json.CompositePropertyTypeKind Type.AbstractLetter Abstract.LetterIndex (Lens.view (Marked.item . Lens._1)) markedAbstractLettersCF
, pure $ makeReverseIndexedSurfacePartType Json.CompositePropertyTypeKind Type.AbstractLetter Abstract.LetterReverseIndex (Lens.view (Marked.item . Lens._1)) markedAbstractLettersCF
, makeWordPartType Json.WordPropertyTypeKind Type.WordCapitalization (pure . Lens.view (Word.info . Word.capitalLens)) <$> mCapMarkedAbstractLetters
]
tryMakeStage5 :: WordSurface (Word.IndexedP a) [Marked.Unit Abstract.Letter [Concrete.Mark]]
-> Maybe (Stage TypeData, WordSurface (Word.IndexedP a) [Marked.Unit Abstract.Letter (Mark.Group Maybe)])
tryMakeStage5 capMarkedAbstractLetters = (,) <$> mStage <*> mMarkedAbstractLetterMarkGroups
where
markedAbstractLetterMarkKindPairs = toMarkedAbstractLetterMarkKindPairs capMarkedAbstractLetters
markedAbstractLetterMarkKinds = Lens.over (wordSurfaceLens . traverse . Marked.marks . traverse) snd markedAbstractLetterMarkKindPairs
mMarkedAbstractLetterMarkGroupPairs = dupApply (wordSurfaceLens . traverse . Marked.marks) Mark.toMarkGroup markedAbstractLetterMarkKinds
mMarkedAbstractLetterMarkGroups = Lens.over (Lens._Just . wordSurfaceLens . traverse . Marked.marks) snd mMarkedAbstractLetterMarkGroupPairs
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind (Type.AbstractLetterMarkGroup) <$> mMarkedAbstractLetterMarkGroups
mTypeParts = sequence
[ pure $ makeSurfacePartType Json.WordStageFunctionTypeKind (Type.Function Type.ConcreteMark Type.MarkKind) Marked._marks markedAbstractLetterMarkKindPairs
, pure $ makeSurfaceType Json.WordStageTypeKind (Type.AbstractLetterMarkKinds) markedAbstractLetterMarkKinds
, makeSurfacePartType Json.WordStageFunctionTypeKind (Type.Function (Type.List Type.MarkKind) (Type.MarkGroup)) (pure . Marked._marks) <$> mMarkedAbstractLetterMarkGroupPairs
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.Accent) (pure . Mark.AccentCount . sum . fmap (maybeToOneOrZero . Lens.view (Marked.marks . Lens._1)) . Word.getSurface) <$> mMarkedAbstractLetterMarkGroups
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.Breathing) (pure . Mark.BreathingCount . sum . fmap (maybeToOneOrZero . Lens.view (Marked.marks . Lens._2)) . Word.getSurface) <$> mMarkedAbstractLetterMarkGroups
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.SyllabicMark) (pure . Mark.SyllabicCount . sum . fmap (maybeToOneOrZero . Lens.view (Marked.marks . Lens._3)) . Word.getSurface) <$> mMarkedAbstractLetterMarkGroups
]
makeStage6 :: WordSurface (Word.IndexedP a) [Marked.Unit Abstract.Letter (Mark.Group Maybe)]
-> (Stage TypeData, WordSurface (Word.IndexedP a) [Marked.Unit Abstract.VowelConsonant (Mark.Group Maybe)])
makeStage6 abstractLetterMarkGroup = (stage, vowelConsonantMarkGroup)
where
vowelConsonantMarkGroupPairs = dupApply' (wordSurfaceLens . traverse . Marked.item) Abstract.toVowelConsonant abstractLetterMarkGroup
vowelConsonantMarkGroup = Lens.over (wordSurfaceLens . traverse . Marked.item) snd vowelConsonantMarkGroupPairs
stage = Stage primaryType typeParts
primaryType = makeSurfaceType Json.WordStageTypeKind Type.VowelConsonantMarkGroup vowelConsonantMarkGroup
typeParts =
[ makeSurfacePartType Json.WordStageFunctionTypeKind (Type.Function Type.AbstractLetter Type.VowelConsonant) (pure . Marked._item) vowelConsonantMarkGroupPairs
, makeSurfacePartType Json.WordStagePartTypeKind Type.VowelConsonant (pure . Marked._item) vowelConsonantMarkGroup
, makeSurfacePartType Json.WordStagePartTypeKind Type.Vowel (Lens.toListOf (Marked.item . Lens._Left)) vowelConsonantMarkGroup
, makeSurfacePartType Json.WordStagePartTypeKind Type.Consonant (Lens.toListOf (Marked.item . Lens._Right)) vowelConsonantMarkGroup
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.Vowel) (pure . Word.VowelCount . sum . fmap (length . (Lens.toListOf (Marked.item . Lens._Left))) . Word.getSurface) vowelConsonantMarkGroup
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.Consonant) (pure . Word.ConsonantCount . sum . fmap (length . (Lens.toListOf (Marked.item . Lens._Right))) . Word.getSurface) vowelConsonantMarkGroup
, makeSurfacePartType Json.CompositePropertyTypeKind Type.SyllabicMarkVowelConsonant getSyllabicMarkVowelConsonant vowelConsonantMarkGroup
]
makeStage7 :: WordSurface (Word.IndexedP a) [Marked.Unit Abstract.VowelConsonant (Mark.Group Maybe)]
-> Maybe (Stage TypeData, WordSurface (Word.IndexedP a) [Syllable.VocalicEither (Mark.AccentBreathing Maybe) Consonant.PlusRoughRho])
makeStage7 vowelConsonantMarkGroup = (,) <$> mStage <*> mVocalicSyllableABConsonantRh
where
startSyllable = Lens.over wordSurfaceLens (Syllable.makeStartVocalic . fmap (\(Marked.Unit a b) -> (a, b))) vowelConsonantMarkGroup
mVocalicSyllableABConsonantBPair = dupApply (wordSurfaceLens . traverse) Syllable.validateVocalicConsonant startSyllable
mVocalicSyllableABConsonantB = Lens.over (Lens._Just . wordSurfaceLens . traverse) snd mVocalicSyllableABConsonantBPair
mVocalicSyllableABConsonantRhPair = mVocalicSyllableABConsonantB >>= dupApply (wordSurfaceLens . traverse . Lens._Right) Consonant.reifyBreathing
mVocalicSyllableABConsonantRh = Lens.over (Lens._Just . wordSurfaceLens . traverse . Lens._Right) snd mVocalicSyllableABConsonantRhPair
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind Type.VocalicSyllableABConsonantRh <$> mVocalicSyllableABConsonantRh
mTypeParts = sequence
[ pure $ makeSurfaceType Json.WordStageTypeKind Type.StartSyllable startSyllable
, makeSurfaceType Json.WordStageFunctionTypeKind (Type.Function Type.StartSyllable Type.VocalicSyllableABConsonantB) <$> mVocalicSyllableABConsonantBPair
, makeSurfaceType Json.WordStageTypeKind Type.VocalicSyllableABConsonantB <$> mVocalicSyllableABConsonantB
, makeSurfacePartType Json.WordStagePartTypeKind Type.VocalicSyllable (Lens.toListOf Lens._Left) <$> Lens.over (Lens._Just . wordSurfaceLens . traverse . Lens._Left) (fmap (const ())) mVocalicSyllableABConsonantB
, makeSurfacePartType Json.WordStagePartTypeKind Type.VocalicSyllableSingle (concat . Lens.toListOf Lens._Left . Lens.over Lens._Left Syllable.vocalicToSingle) <$> mVocalicSyllableABConsonantB
, makeSurfacePartType Json.WordStagePartTypeKind Type.ImproperDiphthong (concat . Lens.toListOf Lens._Left . Lens.over Lens._Left Syllable.vocalicToImproperDiphthong) <$> mVocalicSyllableABConsonantB
, makeSurfacePartType Json.WordStagePartTypeKind Type.Diphthong (concat . Lens.toListOf Lens._Left . Lens.over Lens._Left Syllable.vocalicToDiphthong) <$> mVocalicSyllableABConsonantB
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.Syllable) (pure . sum . fmap Syllable.getSyllableCount . Word.getSurface) <$> mVocalicSyllableABConsonantB
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.VocalicSyllableSingle) (pure . sum . fmap Syllable.getVocalicSingleCount . Word.getSurface) <$> mVocalicSyllableABConsonantB
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.ImproperDiphthong) (pure . sum . fmap Syllable.getImproperDiphthongCount . Word.getSurface) <$> mVocalicSyllableABConsonantB
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.Diphthong) (pure . sum . fmap Syllable.getDiphthongCount . Word.getSurface) <$> mVocalicSyllableABConsonantB
, makeSurfacePartType Json.WordStageFunctionTypeKind (Type.Function Type.ConsonantBreathing Type.ConsonantRh) (Lens.toListOf Lens._Right) <$> mVocalicSyllableABConsonantRhPair
, makeSurfacePartType Json.WordStagePartTypeKind Type.ConsonantRh (Lens.toListOf Lens._Right) <$> mVocalicSyllableABConsonantRh
]
makeStage8 :: forall a. WordSurface (Word.IndexedP a) [Syllable.VocalicEither (Mark.AccentBreathing Maybe) Consonant.PlusRoughRho]
-> Maybe (Stage TypeData, WordSurface (Word.IndexedP a)
(Syllable.SyllableListOrConsonants (Mark.AccentBreathing Maybe) [Consonant.PlusRoughRho]))
makeStage8 vocalicSyllableABConsonantRh = (,) <$> mStage <*> mSyllableApproxAB
where
vocalicSyllableABConsonantCluster = Lens.over wordSurfaceLens Syllable.clusterConsonants vocalicSyllableABConsonantRh
vocalicSyllableABConsonantClusterPlace3 = Lens.over wordSurfaceLens Syllable.tagConsonantPositions vocalicSyllableABConsonantCluster
vocalicSyllableABConsonantClusterPlace3Swap = Lens.over (wordSurfaceLens . traverse . Lens._Right) Tuple.swap vocalicSyllableABConsonantClusterPlace3
initialConsonantClusterSet = Place.getInitialSet . Lens.toListOf (wordSurfaceLens . traverse . Lens._Right) $ vocalicSyllableABConsonantClusterPlace3
vocalicSyllableABConsonantClusterPlace4 = Lens.over (wordSurfaceLens . traverse . Lens._Right) (Place.applyAttestation initialConsonantClusterSet) vocalicSyllableABConsonantClusterPlace3
vocalicSyllableABConsonantClusterMAI = Lens.over (wordSurfaceLens . traverse . Lens._Right . Lens._2) (\(_,b,_,d) -> (b,d)) vocalicSyllableABConsonantClusterPlace4
mSyllableRightAB :: Maybe (WordSurface (Word.IndexedP a) (Syllable.SyllableListOrConsonants (Mark.AccentBreathing Maybe) [Consonant.PlusRoughRho]))
mSyllableRightAB = wordSurfaceLens Syllable.makeSyllableMedialNext vocalicSyllableABConsonantCluster
mSyllableRightABSurface :: Maybe (WordSurface (Word.IndexedP a) [Syllable.SyllableOrConsonants (Mark.AccentBreathing Maybe) [Consonant.PlusRoughRho]])
mSyllableRightABSurface = unifySurfaceSyllables mSyllableRightAB
mSyllableRightSurface :: Maybe (WordSurface (Word.IndexedP a) [Syllable.SyllableOrConsonants () [Consonant.PlusRoughRho]])
mSyllableRightSurface = dropMark mSyllableRightABSurface
dropMark :: Maybe (WordSurface (Word.IndexedP a) [Syllable.SyllableOrConsonants b [Consonant.PlusRoughRho]])
-> Maybe (WordSurface (Word.IndexedP a) [Syllable.SyllableOrConsonants () [Consonant.PlusRoughRho]])
dropMark = Lens.over (Lens._Just . wordSurfaceLens . traverse . Lens._Left) (Syllable.mapSyllableMark (const ()))
approxSplit = Consonant.splitScriptSyllable initialConsonantClusterSet
mSyllableApproxAB :: Maybe (WordSurface (Word.IndexedP a) (Syllable.SyllableListOrConsonants (Mark.AccentBreathing Maybe) [Consonant.PlusRoughRho]))
mSyllableApproxAB = mSyllableRightAB >>= (wordSurfaceLens . Lens._Left) (Syllable.splitMedial approxSplit)
mSyllableApprox :: Maybe (WordSurface (Word.IndexedP a) (Syllable.SyllableListOrConsonants () [Consonant.PlusRoughRho]))
mSyllableApprox = stripSyllableMark mSyllableApproxAB
mSyllableApproxABSurface = unifySurfaceSyllables mSyllableRightAB
mSyllableApproxSurface = dropMark mSyllableApproxABSurface
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStagePartTypeKind Type.ScriptSyllableConsonantRhAB_Approx <$> mSyllableApproxABSurface
mTypeParts = sequence
[ makeWordPartType Json.WordPropertyTypeKind Type.ListScriptSyllableConsonantRh (pure . Lens.toListOf (Word.surface . Lens._Left . traverse)) <$> mSyllableApprox
, makeSurfaceType Json.WordStagePartTypeKind Type.ScriptSyllableConsonantRh_Approx <$> mSyllableApproxSurface
, makeSurfaceType Json.WordStageTypeKind Type.ScriptSyllableConsonantRhAB_Right <$> mSyllableRightABSurface
, makeSurfaceType Json.WordStagePartTypeKind Type.ScriptSyllableConsonantRh_Right <$> mSyllableRightSurface
, pure $ makeSurfaceType Json.WordStageTypeKind Type.VocalicSyllableABConsonantRhCluster vocalicSyllableABConsonantCluster
, pure $ makeSurfacePartType Json.WordStagePartTypeKind Type.ConsonantRhCluster (Lens.toListOf Lens._Right) vocalicSyllableABConsonantCluster
, pure $ makeSurfacePartType Json.CompositePropertyTypeKind Type.ConsonantRhClusterPlace3 (Lens.toListOf Lens._Right) vocalicSyllableABConsonantClusterPlace3
, pure $ makeSurfacePartType Json.CompositePropertyTypeKind Type.ConsonantRhClusterPlace3Swap (Lens.toListOf Lens._Right) vocalicSyllableABConsonantClusterPlace3Swap
, pure $ makeSurfacePartType Json.CompositePropertyTypeKind Type.ConsonantRhClusterPlaceInfo (fmap (\(a, (b, c)) -> (b, c, Consonant.splitScriptSyllableInfo a)) . Lens.toListOf Lens._Right) vocalicSyllableABConsonantClusterMAI
]
unifySurfaceSyllables :: Maybe (WordSurface c (Syllable.SyllableListOrConsonants m c1))
-> Maybe (WordSurface c [Syllable.SyllableOrConsonants m c1])
unifySurfaceSyllables = Lens.over (Lens._Just . wordSurfaceLens) Syllable.unifySyllableConsonant
stripSyllableMark :: Traversable t0 => Maybe [Work.Indexed [Word.Word c (Either (t0 (Syllable.Syllable b c2)) c1)]]
-> Maybe [Work.Indexed [Word.Word c (Either (t0 (Syllable.Syllable () c2)) c1)]]
stripSyllableMark = Lens.over (Lens._Just . wordSurfaceLens . Lens._Left . traverse) (Syllable.mapSyllableMark (const ()))
makeStage9 :: WordSurface Word.Capital (Syllable.SyllableListOrConsonants (Mark.AccentBreathing Maybe) [Consonant.PlusRoughRho])
-> Maybe (Stage TypeData, WordSurface Word.WithCrasis (Syllable.SyllableListOrConsonants (Maybe Mark.Accent) [Consonant.PlusRoughRhoRoughBreathing]))
makeStage9 syllableApproxAB = (,) <$> mStage <*> mProcessed
where
wordApplyCrasis :: Word.Word Word.Capital (Syllable.SyllableListOrConsonants (Mark.AccentBreathing Maybe) [c])
-> Word.Word Word.WithCrasis (Syllable.SyllableListOrConsonants (Mark.AccentBreathing Maybe) [c])
wordApplyCrasis w = Lens.over Word.info (Word.addCrasis (Syllable.getCrasis . Word.getSurface $ w)) w
withCrasis = Lens.over (traverse . Work.content . traverse) wordApplyCrasis syllableApproxAB
mProcessed = wordSurfaceLens Syllable.processBreathing withCrasis
mProcessedSurfaceNoMarks = unifySurfaceSyllables . stripSyllableMark $ mProcessed
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind Type.ScriptSyllableConsonantRBA_Approx <$> unifySurfaceSyllables mProcessed
mTypeParts = sequence
[ makeWordPartType Json.WordPropertyTypeKind Type.ListScriptSyllableConsonantRB (pure . Lens.toListOf (Word.surface . traverse)) <$> mProcessedSurfaceNoMarks
, makeSurfaceType Json.WordStagePartTypeKind Type.ScriptSyllableConsonantRB_Approx <$> mProcessedSurfaceNoMarks
, makeWordPartType Json.WordPropertyTypeKind Type.Crasis (Lens.toListOf (Word.info . Word.crasisLens)) <$> mProcessed
]
makeStage10 :: WordSurface Word.WithCrasis (Syllable.SyllableListOrConsonants (Maybe Mark.Accent) [Consonant.PlusRoughRhoRoughBreathing])
-> Maybe (Stage TypeData, WordSurface Word.WithAccent (Syllable.SyllableListOrConsonants (Maybe Mark.AcuteCircumflex) [Consonant.PlusRoughRhoRoughBreathing]))
makeStage10 syllableRBA = (,) <$> mStage <*> mWithAccent
where
mWithSentence :: Maybe (WordSurface Word.Sentence (Syllable.SyllableListOrConsonants (Maybe Mark.Accent) [Consonant.PlusRoughRhoRoughBreathing]))
mWithSentence = (traverse . Work.content . traverse) wordAddSentence syllableRBA
wordAddSentence w = do
pair <- Punctuation.tryGetSentencePair $ getSuffix w
return $ Lens.over Word.info (Word.addSentencePair pair) w
getSuffix w = concatMap Text.unpack . Lens.toListOf (Word.info . Word.suffixLens . Lens._Just . Word.suffix) $ w
mGraveGonePairs :: Maybe (WordSurface Word.Sentence (Syllable.SyllableListOrConsonants (Maybe (Mark.Accent, Mark.AcuteCircumflex)) [Consonant.PlusRoughRhoRoughBreathing]))
mGraveGonePairs = mWithSentence >>=
((traverse . Work.content . traverse)
(\w -> dupApply
(Word.surface . Lens._Left . traverse . Syllable.syllableMarkLens . Lens._Just)
(Syllable.processGrave (getEndOfSentence w))
w))
mGraveGone :: Maybe (WordSurface Word.Sentence (Syllable.SyllableListOrConsonants (Maybe Mark.AcuteCircumflex) [Consonant.PlusRoughRhoRoughBreathing]))
mGraveGone = Lens.over (Lens._Just . wordSurfaceLens . Lens._Left . traverse . Syllable.syllableMarkLens . Lens._Just) snd mGraveGonePairs
getEndOfSentence = Lens.view (Word.info . Word.sentenceLens . Lens._1)
mWithEnclitic :: Maybe (WordSurface Word.WithEnclitic (Syllable.SyllableListOrConsonants (Maybe Mark.AcuteCircumflex) [Consonant.PlusRoughRhoRoughBreathing]))
mWithEnclitic = Lens.over (Lens._Just . traverse . Work.content) Syllable.markInitialEnclitic mGraveGone
mWithAccent :: Maybe (WordSurface Word.WithAccent (Syllable.SyllableListOrConsonants (Maybe Mark.AcuteCircumflex) [Consonant.PlusRoughRhoRoughBreathing]))
mWithAccent = mWithEnclitic >>=
( (traverse . Work.content . traverse)
(\w -> do
a <- Syllable.getWordAccent (Word.getSurface w)
return $ Lens.over Word.info (Word.addAccent a) w
)
)
mStage = Stage <$> mPrimaryType <*> mTypeParts
mPrimaryType = makeSurfaceType Json.WordStageTypeKind Type.ScriptSyllableConsonantRBAC_Approx <$> unifySurfaceSyllables mWithAccent
mTypeParts = sequence
[ makeWordPartType Json.WordPropertyTypeKind Type.WordAccent (Lens.toListOf (Word.info . Word.accentLens)) <$> mWithAccent
, makeWordPartType Json.WordPropertyTypeKind Type.WordUltimaUnaccented
(Lens.over traverse Word.getUltimaUnaccented . Lens.toListOf (Word.info . Word.accentLens)) <$> mWithAccent
, makeWordPartType Json.WordPropertyTypeKind (Type.Count Type.AcuteCircumflex)
(pure . Word.AcuteCircumflexCount . length . Lens.toListOf (Word.surface . Lens._Left . traverse . Syllable.syllableMarkLens . Lens._Just))
<$> mWithEnclitic
, makeWordPartType Json.WordPropertyTypeKind Type.EndOfSentence (pure . getEndOfSentence) <$> mWithSentence
, makeWordPartType Json.WordPropertyTypeKind Type.UnicodeEndOfSentence
(Lens.toListOf (Word.info . Word.sentenceLens . Lens._2 . Lens._Just)) <$> mWithSentence
, makeWordPartType Json.CompositePropertyTypeKind Type.EndOfSentenceAccent
(\w -> fmap (\x -> (getEndOfSentence w, x)) . Lens.toListOf (Word.surface . Lens._Left . traverse . Syllable.syllableMarkLens . Lens._Just) $ w)
<$> mWithSentence
, makeWordPartType Json.WordStagePartFunctionTypeKind (Type.Function Type.Accent Type.AcuteCircumflex)
(Lens.toListOf (Word.surface . Lens._Left . traverse . Syllable.syllableMarkLens . Lens._Just)) <$> mGraveGonePairs
, makeWordPartType Json.WordStagePartTypeKind Type.AcuteCircumflex
(Lens.toListOf (Word.surface . Lens._Left . traverse . Syllable.syllableMarkLens . Lens._Just)) <$> mGraveGone
, makeReverseIndexedSurfacePartType2
Json.CompositePropertyTypeKind
Type.AcuteCircumflex
Syllable.ReverseIndex
(Lens.toListOf (Lens._Left . traverse . Syllable.syllableMarkLens))
<$> mGraveGone
, makeWordPartType Json.WordPropertyTypeKind Type.InitialEnclitic
(Lens.toListOf (Word.info . Word.encliticLens)) <$> mWithEnclitic
]
getIndexedStageTypeDatas :: [Stage (Json.TypeIndex, TypeData)] -> [(Json.TypeIndex, TypeData)]
getIndexedStageTypeDatas = List.sortOn fst . concatMap getTypes
where
getTypes (Stage s ss) = s : ss
indexStage :: Json.TypeIndex -> Stage TypeData -> (Json.TypeIndex, Stage (Json.TypeIndex, TypeData))
indexStage i (Stage t parts) = (i', Stage (i, t) indexedParts)
where
(i', indexedParts) = Lens.over Lens._2 reverse . Foldable.foldl' go (i + 1, []) $ parts
go (x, ps) p = (x + 1, (x, p) : ps)
indexStages :: [Stage TypeData] -> [Stage (Json.TypeIndex, TypeData)]
indexStages = reverse . snd . Foldable.foldl' go (0, [])
where
go (i, ss) s = (i', s' : ss)
where
(i', s') = indexStage i s
maybeToOneOrZero :: Maybe a -> Int
maybeToOneOrZero Nothing = 0
maybeToOneOrZero (Just _) = 1
lookupAll :: Ord a => Map a b -> [a] -> Maybe [b]
lookupAll m = traverse (flip Map.lookup m)
getWorks :: [Json.TypeIndex] -> Map WordLocation [(Json.TypeIndex, [Json.ValueIndex])] -> [Work.Indexed [Word.Word Word.Basic a]] -> [Json.Work]
getWorks summaryTypes m works = workInfos
where
workInfos = fmap getWorkInfo works
getWorkInfo (Work.Work (workIndex, workSource, workTitle) workWords) =
Json.Work workSource workTitle (getWords workIndex workWords) (getWordGroups workWords) summaryTypes
getWords workIndex = fmap (getWord workIndex)
getWord workIndex (Word.Word (i, _) _) = Json.Word . concat . Maybe.maybeToList . Map.lookup (workIndex, i) $ m
getWordGroups ws = [Json.WordGroup "Paragraphs" (getParagraphs ws)]
getParagraphs :: [Word.Word Word.Basic a] -> [[Word.Index]]
getParagraphs
= fmap snd
. Map.toAscList
. Lens.over (traverse . traverse) (Lens.view Word.indexLens)
. Utility.mapGroupBy (Lens.view Word.paragraphIndexLens)
. fmap Word.getInfo
toComposedWords
:: WordSurfaceBasic Word.SourceInfo
-> WordSurfaceBasic [Unicode.Composed]
toComposedWords = Lens.over wordSurfaceLens (Unicode.toComposed . Word.getSource . Word.getSourceInfoWord)
toDecomposedWordPairs
:: WordSurfaceBasic [Unicode.Composed]
-> WordSurfaceBasic [(Unicode.Composed, [Unicode.Decomposed])]
toDecomposedWordPairs = Lens.over (wordSurfaceLens . traverse) (\x -> (x, Unicode.decompose' x))
toDecomposedWords
:: WordSurfaceBasic [(Unicode.Composed, [Unicode.Decomposed])]
-> WordSurfaceBasic [Unicode.Decomposed]
toDecomposedWords = Lens.over wordSurfaceLens (concatMap snd)
splitDecomposedElision
:: WordSurface Word.Basic [Unicode.Decomposed]
-> WordSurface Word.Elision [Unicode.Decomposed]
splitDecomposedElision = Lens.over (traverse . Work.content . traverse) go
where
go :: Word.Word Word.Basic [Unicode.Decomposed] -> Word.Word Word.Elision [Unicode.Decomposed]
go w = newInfo
where
newInfo = Lens.over Word.info (Word.addElisionPair e) newSurface
newSurface = Lens.set Word.surface as w
(e, as) = Elision.split Unicode.decomposed (Word.getSurface w)
toUnicodeLetterMarksPairs
:: WordSurface b [Unicode.Decomposed]
-> Either Unicode.Error (WordSurface b [([Unicode.Decomposed], Marked.Unit Unicode.Letter [Unicode.Mark])])
toUnicodeLetterMarksPairs = wordSurfaceLens Unicode.parseMarkedLetters
toUnicodeLetterMarks
:: WordSurface b [([Unicode.Decomposed], Marked.Unit Unicode.Letter [Unicode.Mark])]
-> WordSurface b [Marked.Unit Unicode.Letter [Unicode.Mark]]
toUnicodeLetterMarks = Lens.over (wordSurfaceLens . traverse) snd
toMarkedConcreteLetters
:: WordSurface b [Marked.Unit Unicode.Letter a]
-> Maybe (WordSurface b [Marked.Unit (Unicode.Letter, Concrete.Letter) a])
toMarkedConcreteLetters = dupApply (wordSurfaceLens . traverse . Marked.item) Concrete.toMaybeLetter
toMarkedConcreteMarks
:: WordSurface b [Marked.Unit a [Unicode.Mark]]
-> Maybe (WordSurface b [Marked.Unit a [(Unicode.Mark, Concrete.Mark)]])
toMarkedConcreteMarks = dupApply (wordSurfaceLens . traverse . Marked.marks . traverse) Concrete.toMaybeMark
toMarkedUnicodeConcretePairs
:: WordSurface b [Marked.Unit (Unicode.Letter, Concrete.Letter) [(Unicode.Mark, Concrete.Mark)]]
-> WordSurface b [(Marked.Unit Unicode.Letter [Unicode.Mark], Marked.Unit Concrete.Letter [Concrete.Mark])]
toMarkedUnicodeConcretePairs = Lens.over (wordSurfaceLens . traverse) go
where
overBoth f g = Lens.over (Marked.marks . traverse) g . Lens.over Marked.item f
go x = (overBoth fst fst x, overBoth snd snd x)
toMarkedAbstractLetterMarkKindPairs
:: WordSurface b [Marked.Unit a [Concrete.Mark]]
-> WordSurface b [Marked.Unit a ([(Concrete.Mark, Mark.Kind)])]
toMarkedAbstractLetterMarkKindPairs = dupApply' (wordSurfaceLens . traverse . Marked.marks . traverse) Mark.toKind
toCapitalWord :: [Work.Indexed [Word.Word Word.Elision [Marked.Unit (t, Abstract.Case, t1) m0]]]
-> Maybe [Work.Indexed [Word.Word Word.Capital [Marked.Unit (t, t1) m0]]]
toCapitalWord = fmap transferCapitalSurfaceToWord . toCapitalWordSurface
toCapitalWordSurface :: [Work.Indexed [Word.Word Word.Elision [Marked.Unit (t, Abstract.Case, t1) m0]]]
-> Maybe [Work.Indexed [Word.Word Word.Elision (Word.IsCapitalized, [Marked.Unit (t, t1) m0])]]
toCapitalWordSurface = wordSurfaceLens (Abstract.validateIsCapitalized ((\(_,x,_) -> x) . Marked._item) (Lens.over Marked.item (\(x,_,y) -> (x,y))))
transferCapitalSurfaceToWord :: [Work.Indexed [Word.Word Word.Elision (Word.IsCapitalized, [Marked.Unit (t, t1) m0])]]
-> [Work.Indexed [Word.Word Word.Capital [Marked.Unit (t, t1) m0]]]
transferCapitalSurfaceToWord = Lens.over (traverse . Work.content . traverse) setCapital
where
setCapital (Word.Word wi (c, m)) = Word.Word (Word.addCapital c wi) m
validateFinalForm :: [Work.Indexed [Word.Word a [Marked.Unit (t, Abstract.Final) m0]]]
-> Maybe [Work.Indexed [Word.Word a [Marked.Unit t m0]]]
validateFinalForm = wordSurfaceLens $ Abstract.validateLetterFinal (Lens.view $ Marked.item . Lens._2) (Lens.over Marked.item fst)
getSyllabicMarkVowelConsonant :: Marked.Unit Abstract.VowelConsonant (Mark.Group Maybe) -> [(Mark.Syllabic, Abstract.VowelConsonant)]
getSyllabicMarkVowelConsonant (Marked.Unit vc (_, _, Just m)) = pure (m, vc)
getSyllabicMarkVowelConsonant _ = mempty
| scott-fleischman/greek-grammar | haskell/greek-grammar/src/Text/Greek/IO/Process.hs | mit | 37,505 | 0 | 21 | 4,966 | 10,347 | 5,375 | 4,972 | 443 | 1 |
module Buffer.Backend.MinMax
( minMaxBuffer
, minMaxBufferBy
) where
import Buffer.Internal.Types
-- | A MinMaxBuffer only holds at most two elements at a time, the max and the
-- min, in whatever order they arrive in.
data MinMaxBuffer a
= Nil
| Single a
| Pair a a
-- | Put function for orderable types.
put :: Ord a => a -> MinMaxBuffer a -> MinMaxBuffer a
put = putBy compare
-- | Put function for abitrary types, as long as they have a comparison function
-- defined. This is looks annoyingly complicated, but most of the
-- complications arise due to the fact that the elements need to retain their
-- order. If 'a' comes after 'b', and both are inserted successfully, then the
-- order must be [b, a].
putBy :: (a -> a -> Ordering) -> a -> MinMaxBuffer a -> MinMaxBuffer a
putBy cmp x = \case
Nil -> Single x
Single a -> Pair a x
Pair a b -> case (cmp x a == LT, cmp x b == LT, cmp a b == LT) of
(False, False, False) -> Pair b x
(False, False, True) -> Pair a x
(True, True, False) -> Pair a x
(True, True, True) -> Pair b x
_ -> Pair a b
-- | Flushes the buffer to a list.
flush :: MinMaxBuffer a -> [a]
flush = \case
Nil -> []
Single a -> [a]
Pair a b -> [b, a]
-- | Alias for the empty buffer, so we don't expose the MinMaxBuffer
-- constructors.
empty :: MinMaxBuffer a
empty = Nil
-- | Wraps the functionality of a MinMaxBuffer in a 'Buffer'.
minMaxBuffer :: Ord a => Buffer a
minMaxBuffer = Buffer
{ _put = put
, _flush = flush
, _empty = empty
, _buffer = empty
}
-- | Given a comparison function, wrap the functionality of a MinMaxBuffer in a
-- 'Buffer'.
minMaxBufferBy :: (a -> a -> Ordering) -> Buffer a
minMaxBufferBy cmp = Buffer
{ _put = putBy cmp
, _flush = flush
, _empty = empty
, _buffer = empty
}
| SilverSylvester/cplot | src/Buffer/Backend/MinMax.hs | mit | 1,856 | 0 | 11 | 484 | 478 | 262 | 216 | -1 | -1 |
module ProjectEuler.Problem36
( problem
) where
import Petbox
import ProjectEuler.Types
problem :: Problem
problem = pureProblem 36 Solved result
result :: Int
result = sum
[ x
| x <- [1..1000000-1]
-- the ordering is intentional: it's more likely and efficient
-- to check for decimals before checking binaries.
, x == numReverseInBase 10 x
, x == numReverseInBase 2 x
]
| Javran/Project-Euler | src/ProjectEuler/Problem36.hs | mit | 398 | 0 | 10 | 88 | 97 | 53 | 44 | 12 | 1 |
{-# LANGUAGE CPP, MultiParamTypeClasses #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module System.Process.Text.Builder where
#if !MIN_VERSION_base(4,8,0)
import Control.Applicative ((<$>))
#endif
import Control.DeepSeq (force)
import qualified Control.Exception as C (evaluate)
import Data.ListLike.IO (hGetContents)
import Data.Text.Lazy (toChunks)
import Data.Text.Lazy.Builder (Builder, fromText)
import Prelude hiding (null)
import System.Process
import System.Process.Common
import System.Exit (ExitCode)
-- | Like 'System.Process.readProcessWithExitCode', but using 'Text'
instance ListLikeProcessIO Builder Char where
forceOutput = C.evaluate . force
readChunks h = (map fromText . toChunks) <$> hGetContents h
-- | Specialized version for backwards compatibility.
readProcessWithExitCode
:: FilePath -- ^ command to run
-> [String] -- ^ any arguments
-> Builder -- ^ standard input
-> IO (ExitCode, Builder, Builder) -- ^ exitcode, stdout, stderr
readProcessWithExitCode = System.Process.Common.readProcessWithExitCode
readCreateProcessWithExitCode
:: CreateProcess -- ^ command and arguments to run
-> Builder -- ^ standard input
-> IO (ExitCode, Builder, Builder) -- ^ exitcode, stdout, stderr
readCreateProcessWithExitCode = System.Process.Common.readCreateProcessWithExitCode
| seereason/process-extras | src/System/Process/Text/Builder.hs | mit | 1,438 | 0 | 9 | 296 | 244 | 152 | 92 | 27 | 1 |
main = do
let a = "initial"
putStrLn a
let b = 1
let c = 2
print b >> print c
let d = True
print d
let e = undefined :: Int
print e
let f = "short"
putStrLn f
| theDrake/haskell-experiments | variables.hs | mit | 185 | 0 | 9 | 68 | 100 | 42 | 58 | 12 | 1 |
module Zipper
( BinTree(BT)
, fromTree
, left
, right
, setLeft
, setRight
, setValue
, toTree
, up
, value
) where
data BinTree a = BT { btValue :: a
, btLeft :: Maybe (BinTree a)
, btRight :: Maybe (BinTree a)
} deriving (Eq, Show)
data Zipper a = Dummy deriving (Eq, Show)
fromTree :: BinTree a -> Zipper a
fromTree tree = error "You need to implement this function."
toTree :: Zipper a -> BinTree a
toTree zipper = error "You need to implement this function."
value :: Zipper a -> a
value zipper = error "You need to implement this function."
left :: Zipper a -> Maybe (Zipper a)
left zipper = error "You need to implement this function."
right :: Zipper a -> Maybe (Zipper a)
right zipper = error "You need to implement this function."
up :: Zipper a -> Maybe (Zipper a)
up zipper = error "You need to implement this function."
setValue :: a -> Zipper a -> Zipper a
setValue x zipper = error "You need to implement this function."
setLeft :: Maybe (BinTree a) -> Zipper a -> Zipper a
setLeft tree zipper = error "You need to implement this function."
setRight :: Maybe (BinTree a) -> Zipper a -> Zipper a
setRight tree zipper = error "You need to implement this function."
| exercism/xhaskell | exercises/practice/zipper/src/Zipper.hs | mit | 1,261 | 0 | 11 | 308 | 401 | 203 | 198 | 36 | 1 |
{-# LANGUAGE QuasiQuotes, ScopedTypeVariables #-}
module Handler.NewTrip where
import Import
import Utils.Database
import Utils.Users
import Yesod.Form.Bootstrap3
import qualified Hasql as H
import qualified Data.Text as T
getNewTripR :: Handler Html
getNewTripR = do
req <- waiRequest
restrictToAdmins req
dbres <- liftIO $ do
conn <- getDbConn
H.session conn $ H.tx Nothing $ do
(reasons :: [(Int,T.Text)]) <- H.listEx $ [H.stmt|
SELECT *
FROM reasons
ORDER BY reason ASC
|]
return reasons
case dbres of
Left err -> error $ show err
Right reasons -> do
(widget, enctype) <- generateFormPost $ newTripForm reasons
defaultLayout $ do
setTitle $ "New Trip | Brandreth Guestbook"
$(widgetFile "newtrip")
data TripReason = TripReason Int
newTripAForm :: [(Int,T.Text)] -> AForm Handler TripReason
newTripAForm reasons =
TripReason <$> areq (selectFieldList sreasons) "Trip Reason" Nothing
<* bootstrapSubmit ("Submit" :: BootstrapSubmit Text)
where sreasons = map (\(x,y) -> (y,x)) reasons
newTripForm :: [(Int,T.Text)]
-> Html
-> MForm Handler (FormResult TripReason, Widget)
newTripForm reasons = renderBootstrap3 BootstrapBasicForm $ newTripAForm reasons
postNewTripR :: Handler Html
postNewTripR = do
req <- waiRequest
restrictToAdmins req
dbres <- liftIO $ do
conn <- getDbConn
H.session conn $ H.tx Nothing $ do
(reasons :: [(Int,T.Text)]) <- H.listEx $ [H.stmt|
SELECT *
FROM reasons
ORDER BY reason ASC
|]
return (conn, reasons)
case dbres of
Left err -> error $ show err
Right (conn, reasons) -> do
((result, _), _) <- runFormPost (newTripForm reasons)
case result of
FormSuccess (TripReason r) -> do
dbres' <- liftIO $ do
H.session conn $ H.tx Nothing $ do
Identity tid <- H.singleEx $ [H.stmt|
INSERT INTO "trips" (reason_id)
VALUES (?)
RETURNING id
|] r
return tid
case dbres' of
Left err -> error $ show err
Right tid -> redirect $ TripR tid
FormMissing -> error $ "No form data sent!"
FormFailure err -> error $ show err
| dgonyeo/brandskell | Handler/NewTrip.hs | mit | 2,727 | 0 | 27 | 1,093 | 717 | 360 | 357 | 59 | 5 |
{-# LANGUAGE TemplateHaskell, RankNTypes #-}
module Minesweeper.Game where
import Minesweeper.Board
import Minesweeper.Cell
import Control.Lens
import Control.Monad.State
import Data.Maybe
import qualified Data.Vector as Vector (concat, toList)
import System.Random
data Game = Game
{ _board :: Board
, _remainingFlags :: Int
}
makeLenses ''Game
instance Show Game where
show = show . view board
type GameState = StateT Game IO
data Status = Won | Lose | Error | Move deriving (Show, Eq)
initGame :: StdGen -> Game
initGame rng = Game { _board = initBoard 20 20 10 rng
, _remainingFlags = 10
}
-- Returns True if all mines are flagged
isWon :: GameState Bool
isWon = do
boardCells <- use $ board . cells
let concatCells = Vector.toList $ Vector.concat $ Vector.toList boardCells
let cellStates = map checkCellStatus concatCells
if all (==True) cellStates then
return True
else
return False
-- Checks if a cell is in a winnable state
checkCellStatus :: Cell -> Bool
checkCellStatus c
| c ^. flagged && c ^. mined = True
| not (c ^. mined) = True
| otherwise = False
-- Reveals a cell if isn't flagged and hasn't been revealed already
revealCell :: Int -> Int -> GameState Status
revealCell x y = do
r <- isRevealed x y
f <- isFlagged x y
if not r && not f then do
m <- isMined x y
if m then
return Lose
else do
setCellField revealed True x y
won <- isWon
if won then
return Won
else
return Move
else
return Error
-- Toggles an unrevealved cell if the player has enough flags.
toggleFlagCell :: Int -> Int -> GameState Status
toggleFlagCell x y = do
r <- isRevealed x y
if not r then do
f <- isFlagged x y
if not f then do
numFlags <- use remainingFlags
if numFlags > 0 then do
setCellField flagged True x y
remainingFlags -= 1
won <- isWon
if won then
return Won
else
return Move
else
return Error
else do
setCellField flagged False x y
remainingFlags += 1
return Move
else
return Error
isMined :: Int -> Int -> GameState Bool
isMined = getCellField mined
isFlagged :: Int -> Int -> GameState Bool
isFlagged = getCellField flagged
isRevealed :: Int -> Int -> GameState Bool
isRevealed = getCellField revealed
getAdjacentMines :: Int -> Int -> GameState Int
getAdjacentMines = getCellField adjacentMines
-- Helper method for getting a record field of a cell in the board
getCellField :: Getter Cell a -> Int -> Int -> GameState a
getCellField getter x y = do
m <- get
return $ fromJust $ m ^? board . cells . element y . element x . getter
-- Helper method for setting a record field of a cell in the board
setCellField :: Setter Cell Cell a b -> b -> Int -> Int -> GameState ()
setCellField setter val x y = combinedSetter .= val
where combinedSetter = board . cells . element y . element x . setter
| mattdonnelly/CS4012-Minesweeper | src/Minesweeper/Game.hs | mit | 3,261 | 0 | 17 | 1,058 | 916 | 454 | 462 | -1 | -1 |
module Protop.Logic.BuilderSpec (spec) where
import Control.Monad ((>=>))
import Protop.Logic
import Test.Hspec
spec :: Spec
spec = do
morMSpec
prfMSpec
lamSMSpec
sgmSMSpec
lamMSpec
appMSpec
sgmMSpec
morMSpec :: Spec
morMSpec = describe "morM" $ do
it "should properly lift objects" $ do
let s = objM >>= varM >>= \x ->
objM >>= varM >>= \y ->
morM x y >>= \f ->
popM >> popM >>
return f
e = evalM s
show e `shouldBe` "(%1 -> %2)"
kind e `shouldBe` MOR
it "should throw an exception when lifting is unsound" $ do
let s = objM >>= varM >>= \x ->
objM >>= varM >>=
(morM x >=> varM) >>= \f ->
popM >>
lftM f >>= \t ->
popM >> popM >>
return t
print (evalM s) `shouldThrow` anyErrorCall
prfMSpec :: Spec
prfMSpec = describe "prfM" $
it "should create a simple proof signature" $ do
let s = objM >>= varM >>= \x ->
objM >>= varM >>= \y ->
morM x y >>= varM >>= \f ->
morM x y >>= varM >>=
(prfM f >=> \p ->
popM >> popM >> popM >> popM >>
return p)
e = evalM s
show e `shouldBe` "(%3 == %4)"
kind e `shouldBe` PRF
lamSMSpec :: Spec
lamSMSpec = describe "lamSM" $
it "should create a simple lambda signature" $ do
let s = objM >>= varM >>= \x ->
morM x x >>= lamSM
e = evalM s
show e `shouldBe` "(\\(%1 :: Ob) -> (%1 -> %1))"
kind e `shouldBe` (LAM OBJ MOR)
sgmSMSpec :: Spec
sgmSMSpec = describe "sgmSM" $
it "should create a simple sigma signature" $ do
let s = objM >>= varM >>= \t ->
objM >>= varM >>= \x ->
morM x t >>= lamSM >>= sgmSM
e = evalM s
show e `shouldBe` "(Ex (%1 :: Ob) (\\(%2 :: Ob) -> (%2 -> %1)))"
kind e `shouldBe` (SGM OBJ (LAM OBJ MOR))
lamMSpec :: Spec
lamMSpec = describe "lamM" $
it "should create a simple lambda entity" $ do
let s = objM >>= varM >>= lamM
e = evalM s
show e `shouldBe` "(\\(%1 :: Ob) -> %1)"
kind e `shouldBe` (LAM OBJ OBJ)
appMSpec :: Spec
appMSpec = describe "appM" $
it "should create a simple application" $ do
let s = objM >>= varM >>= \_ ->
objM >>= varM >>= \_ ->
objM >>= lamSM >>= lamSM >>= varM >>= \p ->
objM >>= varM >>= \x ->
appM p x >>= \px ->
appM px x >>= \pxx ->
popM >> popM >>
return pxx
e = evalM s
show e `shouldBe` "((%1 %2) %2)"
kind e `shouldBe` OBJ
sgmMSpec :: Spec
sgmMSpec = describe "sgmM" $
it "should create a simple sigma pair" $ do
let s = objM >>= varM >>= \x ->
morM x x >>= varM >>= \f ->
objM >>= varM >>= \y ->
morM y x >>= sgmSM >>= \t ->
sgmM t x f >>= \g ->
popM >> popM >>
return g
e = evalM s
show' e `shouldBe` "<%1, %2> :: (Ex (%3 :: Ob) (%3 -> %1))"
kind e `shouldBe` (SGM OBJ MOR)
| brunjlar/protop | test/Protop/Logic/BuilderSpec.hs | mit | 3,401 | 0 | 24 | 1,460 | 1,066 | 534 | 532 | 97 | 1 |
module Handler.AdminSpec (spec) where
import TestImport
spec :: Spec
spec = withApp $ do
describe "getAdminR" $ do
error "Spec not implemented: getAdminR"
describe "postAdminR" $ do
error "Spec not implemented: postAdminR"
| swamp-agr/carbuyer-advisor | test/Handler/AdminSpec.hs | mit | 253 | 0 | 11 | 62 | 60 | 29 | 31 | 8 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE Rank2Types #-}
{-# LANGUAGE TypeFamilies #-}
{- |
Module: Internal.BuildPure
Description: Helpers for building capnproto messages in pure code.
This module provides some helpers for building capnproto messages and values
in pure code, using the low-level API.
-}
module Internal.BuildPure
( PureBuilder
, createPure
) where
import Control.Monad.Catch (Exception, MonadThrow(..), SomeException)
import Control.Monad.Primitive (PrimMonad(..))
import Control.Monad.ST (ST)
import Capnp.Bits (WordCount)
import Capnp.TraversalLimit (LimitT, MonadLimit, evalLimitT)
import Capnp.Mutability
import Internal.STE
-- | 'PureBuilder' is a monad transformer stack with the instnaces needed
-- manipulate mutable messages. @'PureBuilder' s a@ is morally equivalent
-- to @'LimitT' ('CatchT' ('ST' s)) a@
newtype PureBuilder s a = PureBuilder (LimitT (STE SomeException s) a)
deriving(Functor, Applicative, Monad, MonadThrow, MonadLimit)
instance PrimMonad (PureBuilder s) where
type PrimState (PureBuilder s) = s
primitive = PureBuilder . primitive
runPureBuilder :: WordCount -> PureBuilder s a -> ST s (Either SomeException a)
runPureBuilder limit (PureBuilder m) = steToST $ evalLimitT limit m
-- | @'createPure' limit m@ creates a capnproto value in pure code according
-- to @m@, then freezes it without copying. If @m@ calls 'throwM' then
-- 'createPure' rethrows the exception in the specified monad.
createPure :: (MonadThrow m, MaybeMutable f) => WordCount -> (forall s. PureBuilder s (f ('Mut s))) -> m (f 'Const)
createPure limit m = throwLeft $ createT (runPureBuilder limit m)
where
-- I(zenhack) am surprised not to have found this in one of the various
-- exception packages:
throwLeft :: (Exception e, MonadThrow m) => Either e a -> m a
throwLeft (Left e) = throwM e
throwLeft (Right a) = pure a
| zenhack/haskell-capnp | lib/Internal/BuildPure.hs | mit | 2,007 | 0 | 15 | 394 | 412 | 228 | 184 | 26 | 2 |
{-# LANGUAGE EmptyDataDecls, ForeignFunctionInterface, JavaScriptFFI,
OverloadedStrings, DeriveDataTypeable
#-}
module JavaScript.Web.Canvas ( Context
, Canvas
, Image
, TextAlign(..)
, TextBaseline(..)
, LineCap(..)
, LineJoin(..)
, Repeat(..)
, Gradient
, Pattern
, create
, unsafeToCanvas
, toCanvas
, getContext
, save
, restore
, scale
, rotate
, translate
, transform
, setTransform
, fill
, fillRule
, stroke
, beginPath
, closePath
, clip
, moveTo
, lineTo
, quadraticCurveTo
, bezierCurveTo
, arc
, arcTo
, rect
, isPointInPath
, fillStyle
, strokeStyle
, globalAlpha
, lineJoin
, lineCap
, lineWidth
, setLineDash
, lineDashOffset
, miterLimit
, fillText
, strokeText
, font
, measureText
, textAlign
, textBaseline
, fillRect
, strokeRect
, clearRect
, drawImage
, width
, setWidth
, height
, setHeight
) where
import Prelude hiding (Left, Right)
import Control.Applicative
import Control.Monad
import Data.Data
import Data.Maybe (fromJust)
import Data.Text (Text)
import Data.Typeable
import GHCJS.Foreign
import GHCJS.Marshal
import GHCJS.Types
import JavaScript.Web.Canvas.Internal
import JavaScript.Object (Object)
import qualified JavaScript.Object as O
import JavaScript.Array (JSArray)
import qualified JavaScript.Array as A
data TextAlign = Start
| End
| Left
| Right
| Center
deriving (Eq, Show, Enum, Data, Typeable)
data TextBaseline = Top
| Hanging
| Middle
| Alphabetic
| Ideographic
| Bottom
deriving (Eq, Show, Enum, Data, Typeable)
data LineJoin = LineJoinBevel
| LineJoinRound
| LineJoinMiter
deriving (Eq, Show, Enum)
data LineCap = LineCapButt
| LineCapRound
| LineCapSquare deriving (Eq, Show, Enum, Data, Typeable)
data Repeat = Repeat
| RepeatX
| RepeatY
| NoRepeat
deriving (Eq, Ord, Show, Enum, Data, Typeable)
unsafeToCanvas :: JSVal -> Canvas
unsafeToCanvas r = Canvas r
{-# INLINE unsafeToCanvas #-}
toCanvas :: JSVal -> Maybe Canvas
toCanvas x = error "toCanvas" -- fixme
{-# INLINE toCanvas #-}
create :: Int -> Int -> IO Canvas
create = js_create
{-# INLINE create #-}
getContext :: Canvas -> IO Context
getContext c = js_getContext c
{-# INLINE getContext #-}
save :: Context -> IO ()
save ctx = js_save ctx
{-# INLINE save #-}
restore :: Context -> IO ()
restore = js_restore
{-# INLINE restore #-}
transform :: Double -> Double -> Double -> Double -> Double -> Double -> Context -> IO ()
transform = js_transform
{-# INLINE transform #-}
setTransform :: Double -> Double -> Double -> Double -> Double -> Double -> Context -> IO ()
setTransform = js_setTransform
{-# INLINE setTransform #-}
scale :: Double -> Double -> Context -> IO ()
scale x y ctx = js_scale x y ctx
{-# INLINE scale #-}
translate :: Double -> Double -> Context -> IO ()
translate x y ctx = js_translate x y ctx
{-# INLINE translate #-}
rotate :: Double -> Context -> IO ()
rotate r ctx = js_rotate r ctx
{-# INLINE rotate #-}
fill :: Context -> IO ()
fill ctx = js_fill ctx
{-# INLINE fill #-}
fillRule :: JSString -> Context -> IO ()
fillRule rule ctx = js_fill_rule rule ctx
{-# INLINE fillRule #-}
stroke :: Context -> IO ()
stroke = js_stroke
{-# INLINE stroke #-}
beginPath :: Context -> IO ()
beginPath = js_beginPath
{-# INLINE beginPath #-}
closePath :: Context -> IO ()
closePath = js_closePath
{-# INLINE closePath #-}
clip :: Context -> IO ()
clip = js_clip
{-# INLINE clip #-}
moveTo :: Double -> Double -> Context -> IO ()
moveTo = js_moveTo
{-# INLINE moveTo #-}
lineTo :: Double -> Double -> Context -> IO ()
lineTo = js_lineTo
{-# INLINE lineTo #-}
quadraticCurveTo :: Double -> Double -> Double -> Double -> Context -> IO ()
quadraticCurveTo = js_quadraticCurveTo
{-# INLINE quadraticCurveTo #-}
bezierCurveTo :: Double -> Double -> Double -> Double -> Double -> Double -> Context -> IO ()
bezierCurveTo = js_bezierCurveTo
{-# INLINE bezierCurveTo #-}
arc :: Double -> Double -> Double -> Double -> Double -> Bool -> Context -> IO ()
arc a b c d e bl ctx = js_arc a b c d e bl ctx
{-# INLINE arc #-}
arcTo :: Double -> Double -> Double -> Double -> Double -> Context -> IO ()
arcTo = js_arcTo
{-# INLINE arcTo #-}
rect :: Double -> Double -> Double -> Double -> Context -> IO ()
rect = js_rect
{-# INLINE rect #-}
isPointInPath :: Double -> Double -> Context -> IO ()
isPointInPath = js_isPointInPath
{-# INLINE isPointInPath #-}
fillStyle :: Int -> Int -> Int -> Double -> Context -> IO ()
fillStyle = js_fillStyle
{-# INLINE fillStyle #-}
strokeStyle :: Int -> Int -> Int -> Double -> Context -> IO ()
strokeStyle = js_strokeStyle
{-# INLINE strokeStyle #-}
globalAlpha :: Double -> Context -> IO ()
globalAlpha = js_globalAlpha
{-# INLINE globalAlpha #-}
lineJoin :: LineJoin -> Context -> IO ()
lineJoin LineJoinBevel ctx = js_lineJoin "bevel" ctx
lineJoin LineJoinRound ctx = js_lineJoin "round" ctx
lineJoin LineJoinMiter ctx = js_lineJoin "miter" ctx
{-# INLINE lineJoin #-}
lineCap :: LineCap -> Context -> IO ()
lineCap LineCapButt ctx = js_lineCap "butt" ctx
lineCap LineCapRound ctx = js_lineCap "round" ctx
lineCap LineCapSquare ctx = js_lineCap "square" ctx
{-# INLINE lineCap #-}
miterLimit :: Double -> Context -> IO ()
miterLimit = js_miterLimit
{-# INLINE miterLimit #-}
-- | pass an array of numbers
setLineDash :: JSArray -> Context -> IO ()
setLineDash arr ctx = js_setLineDash arr ctx
{-# INLINE setLineDash #-}
lineDashOffset :: Double -> Context -> IO ()
lineDashOffset = js_lineDashOffset
{-# INLINE lineDashOffset #-}
textAlign :: TextAlign -> Context -> IO ()
textAlign align ctx = case align of
Start -> js_textAlign "start" ctx
End -> js_textAlign "end" ctx
Left -> js_textAlign "left" ctx
Right -> js_textAlign "right" ctx
Center -> js_textAlign "center" ctx
{-# INLINE textAlign #-}
textBaseline :: TextBaseline -> Context -> IO ()
textBaseline baseline ctx = case baseline of
Top -> js_textBaseline "top" ctx
Hanging -> js_textBaseline "hanging" ctx
Middle -> js_textBaseline "middle" ctx
Alphabetic -> js_textBaseline "alphabetic" ctx
Ideographic -> js_textBaseline "ideographic" ctx
Bottom -> js_textBaseline "bottom" ctx
{-# INLINE textBaseline #-}
lineWidth :: Double -> Context -> IO ()
lineWidth = js_lineWidth
{-# INLINE lineWidth #-}
fillText :: JSString -> Double -> Double -> Context -> IO ()
fillText t x y ctx = js_fillText t x y ctx
{-# INLINE fillText #-}
strokeText :: JSString -> Double -> Double -> Context -> IO ()
strokeText t x y ctx = js_strokeText t x y ctx
{-# INLINE strokeText #-}
font :: JSString -> Context -> IO ()
font f ctx = js_font f ctx
{-# INLINE font #-}
measureText :: JSString -> Context -> IO Double
measureText t ctx = js_measureText t ctx
>>= O.getProp "width"
>>= liftM fromJust . fromJSVal
{-# INLINE measureText #-}
fillRect :: Double -> Double -> Double -> Double -> Context -> IO ()
fillRect = js_fillRect
{-# INLINE fillRect #-}
clearRect :: Double -> Double -> Double -> Double -> Context -> IO ()
clearRect = js_clearRect
{-# INLINE clearRect #-}
strokeRect :: Double -> Double -> Double -> Double -> Context -> IO ()
strokeRect = js_strokeRect
{-# INLINE strokeRect #-}
drawImage :: Image -> Int -> Int -> Int -> Int -> Context -> IO ()
drawImage = js_drawImage
{-# INLINE drawImage #-}
createPattern :: Image -> Repeat -> Context -> IO Pattern
createPattern img Repeat ctx = js_createPattern img "repeat" ctx
createPattern img RepeatX ctx = js_createPattern img "repeat-x" ctx
createPattern img RepeatY ctx = js_createPattern img "repeat-y" ctx
createPattern img NoRepeat ctx = js_createPattern img "no-repeat" ctx
{-# INLINE createPattern #-}
setWidth :: Int -> Canvas -> IO ()
setWidth w c = js_setWidth w c
{-# INLINE setWidth #-}
width :: Canvas -> IO Int
width c = js_width c
{-# INLINE width #-}
setHeight :: Int -> Canvas -> IO ()
setHeight h c = js_setHeight h c
{-# INLINE setHeight #-}
height :: Canvas -> IO Int
height c = js_height c
{-# INLINE height #-}
-- ----------------------------------------------------------------------------
foreign import javascript unsafe "$r = document.createElement('canvas');\
\$r.width = $1;\
\$r.height = $2;"
js_create :: Int -> Int -> IO Canvas
foreign import javascript unsafe "$1.getContext('2d')"
js_getContext :: Canvas -> IO Context
foreign import javascript unsafe "$1.save()"
js_save :: Context -> IO ()
foreign import javascript unsafe "$1.restore()"
js_restore :: Context -> IO ()
foreign import javascript unsafe "$7.transform($1,$2,$3,$4,$5,$6)"
js_transform :: Double -> Double -> Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$7.setTransform($1,$2,$3,$4,$5,$6)"
js_setTransform :: Double -> Double -> Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$3.scale($1,$2)"
js_scale :: Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$3.translate($1,$2)"
js_translate :: Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$2.rotate($1)"
js_rotate :: Double -> Context -> IO ()
foreign import javascript unsafe "$1.fill()"
js_fill :: Context -> IO ()
foreign import javascript unsafe "$2.fill($1)"
js_fill_rule :: JSString -> Context -> IO ()
foreign import javascript unsafe "$1.stroke()"
js_stroke :: Context -> IO ()
foreign import javascript unsafe "$1.beginPath()"
js_beginPath :: Context -> IO ()
foreign import javascript unsafe "$1.closePath()"
js_closePath :: Context -> IO ()
foreign import javascript unsafe "$1.clip()"
js_clip :: Context -> IO ()
foreign import javascript unsafe "$3.moveTo($1,$2)"
js_moveTo :: Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$3.lineTo($1,$2)"
js_lineTo :: Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$5.quadraticCurveTo($1,$2,$3,$4)"
js_quadraticCurveTo :: Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$7.bezierCurveTo($1,$2,$3,$4,$5,$6)"
js_bezierCurveTo :: Double -> Double -> Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$7.arc($1,$2,$3,$4,$5,$6)"
js_arc :: Double -> Double -> Double -> Double -> Double -> Bool -> Context -> IO ()
foreign import javascript unsafe "$6.arcTo($1,$2,$3,$4,$5)"
js_arcTo :: Double -> Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$5.rect($1,$2,$3,$4)"
js_rect :: Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$3.isPointInPath($1,$2)"
js_isPointInPath :: Double -> Double -> Context -> IO ()
foreign import javascript unsafe
"$5.fillStyle = 'rgba(' + $1 + ',' + $2 + ',' + $3 + ',' + $4 + ')'"
js_fillStyle :: Int -> Int -> Int -> Double -> Context -> IO ()
foreign import javascript unsafe
"$5.strokeStyle = 'rgba(' + $1 + ',' + $2 + ',' + $3 + ',' + $4 + ')'"
js_strokeStyle :: Int -> Int -> Int -> Double -> Context -> IO ()
foreign import javascript unsafe "$2.globalAlpha = $1"
js_globalAlpha :: Double -> Context -> IO ()
foreign import javascript unsafe
"$2.lineJoin = $1"
js_lineJoin :: JSString -> Context -> IO ()
foreign import javascript unsafe "$2.lineCap = $1"
js_lineCap :: JSString -> Context -> IO ()
foreign import javascript unsafe "$2.miterLimit = $1"
js_miterLimit :: Double -> Context -> IO ()
foreign import javascript unsafe "$2.setLineDash($1)"
js_setLineDash :: JSArray -> Context -> IO ()
foreign import javascript unsafe "$2.lineDashOffset = $1"
js_lineDashOffset :: Double -> Context -> IO ()
foreign import javascript unsafe "$2.font = $1"
js_font :: JSString -> Context -> IO ()
foreign import javascript unsafe "$2.textAlign = $1"
js_textAlign :: JSString -> Context -> IO ()
foreign import javascript unsafe "$2.textBaseline = $1"
js_textBaseline :: JSString -> Context -> IO ()
foreign import javascript unsafe "$2.lineWidth = $1"
js_lineWidth :: Double -> Context -> IO ()
foreign import javascript unsafe "$4.fillText($1,$2,$3)"
js_fillText :: JSString -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$4.strokeText($1,$2,$3)"
js_strokeText :: JSString -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$2.measureText($1)"
js_measureText :: JSString -> Context -> IO Object
foreign import javascript unsafe "$5.fillRect($1,$2,$3,$4)"
js_fillRect :: Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$5.clearRect($1,$2,$3,$4)"
js_clearRect :: Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$5.strokeRect($1,$2,$3,$4)"
js_strokeRect :: Double -> Double -> Double -> Double -> Context -> IO ()
foreign import javascript unsafe "$6.drawImage($1,$2,$3,$4,$5)"
js_drawImage :: Image -> Int -> Int -> Int -> Int -> Context -> IO ()
foreign import javascript unsafe "$3.createPattern($1,$2)"
js_createPattern :: Image -> JSString -> Context -> IO Pattern
foreign import javascript unsafe "$1.width"
js_width :: Canvas -> IO Int
foreign import javascript unsafe "$1.height"
js_height :: Canvas -> IO Int
foreign import javascript unsafe "$2.width = $1;"
js_setWidth :: Int -> Canvas -> IO ()
foreign import javascript unsafe "$2.height = $1;"
js_setHeight :: Int -> Canvas -> IO ()
| ghcjs/ghcjs-base | JavaScript/Web/Canvas.hs | mit | 15,509 | 192 | 14 | 4,548 | 3,872 | 2,018 | 1,854 | 361 | 6 |
{-# LANGUAGE BangPatterns, DataKinds, DeriveDataTypeable, FlexibleInstances, MultiParamTypeClasses #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
module Hadoop.Protos.ContainerManagementProtocol.ContainerManagementProtocolService
(ContainerManagementProtocolService, containerManagementProtocolService, StartContainers, StopContainers,
GetContainerStatuses, startContainers, stopContainers, getContainerStatuses)
where
import Prelude ((+), (/), (.))
import qualified Prelude as Prelude'
import qualified Data.Typeable as Prelude'
import qualified Data.Data as Prelude'
import qualified Text.ProtocolBuffers.Header as P'
import qualified Hadoop.Protos.YarnServiceProtos.StartContainersRequestProto as YarnServiceProtos (StartContainersRequestProto)
import qualified Hadoop.Protos.YarnServiceProtos.StopContainersRequestProto as YarnServiceProtos (StopContainersRequestProto)
import qualified Hadoop.Protos.YarnServiceProtos.GetContainerStatusesRequestProto as YarnServiceProtos
(GetContainerStatusesRequestProto)
import qualified Hadoop.Protos.YarnServiceProtos.StartContainersResponseProto as YarnServiceProtos (StartContainersResponseProto)
import qualified Hadoop.Protos.YarnServiceProtos.StopContainersResponseProto as YarnServiceProtos (StopContainersResponseProto)
import qualified Hadoop.Protos.YarnServiceProtos.GetContainerStatusesResponseProto as YarnServiceProtos
(GetContainerStatusesResponseProto)
type ContainerManagementProtocolService = P'.Service '[StartContainers, StopContainers, GetContainerStatuses]
containerManagementProtocolService :: ContainerManagementProtocolService
containerManagementProtocolService = P'.Service
type StartContainers =
P'.Method ".hadoop.yarn.ContainerManagementProtocolService.startContainers" YarnServiceProtos.StartContainersRequestProto
YarnServiceProtos.StartContainersResponseProto
type StopContainers =
P'.Method ".hadoop.yarn.ContainerManagementProtocolService.stopContainers" YarnServiceProtos.StopContainersRequestProto
YarnServiceProtos.StopContainersResponseProto
type GetContainerStatuses =
P'.Method ".hadoop.yarn.ContainerManagementProtocolService.getContainerStatuses"
YarnServiceProtos.GetContainerStatusesRequestProto
YarnServiceProtos.GetContainerStatusesResponseProto
startContainers :: StartContainers
startContainers = P'.Method
stopContainers :: StopContainers
stopContainers = P'.Method
getContainerStatuses :: GetContainerStatuses
getContainerStatuses = P'.Method | alexbiehl/hoop | hadoop-protos/src/Hadoop/Protos/ContainerManagementProtocol/ContainerManagementProtocolService.hs | mit | 2,526 | 0 | 7 | 226 | 304 | 201 | 103 | 37 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FlexibleContexts, FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
-----------------------------------------------------------------------------
-- |
-- Module : Reactive.Banana.JsHs.Types
-- Copyright : (c) Artem Chirkin
-- License : MIT
-- Maintainer : Artem Chirkin <[email protected]>
-- Stability : experimental
--
-----------------------------------------------------------------------------
module Reactive.Banana.JsHs.Types
( Time (..), getTime
, HTMLElement (..)
-- * Events
, PointerEvent (..)
, PointerEventValue (..), pointers, eventType, button
, ResizeEvent (..)
, WheelEvent (..)
, ElementClick (..)
-- * Modifiers
, ModKey (..)
-- * Positions
, Coords2D (..), coordX, coordY, unpackCoords2D
) where
import qualified JsHs.Array as JS
import JsHs.Types
import JsHs.LikeJS.Class
newtype HTMLElement = HTMLElement JSVal
instance LikeJS "HTMLElement" HTMLElement
-- | Current time in milliseconds
foreign import javascript unsafe "performance.now()"
getTime :: IO Time
-- | Unified representation for MouseEvent and TouchEvent in JS
data PointerEvent
= PointerUp PointerEventValue
| PointerDown PointerEventValue
| PointerMove PointerEventValue
| PointerCancel PointerEventValue
| PointerClick PointerEventValue
-- | Mouse wheel event
data WheelEvent = WheelUp | WheelDown
deriving (Eq,Show,Ord)
instance LikeJS "Number" WheelEvent where
asJSVal WheelUp = asJSVal (1 :: Double)
asJSVal WheelDown = asJSVal (-1 :: Double)
asLikeJS jsv = case asLikeJS jsv :: Double of
v -> if v >= 0 then WheelUp else WheelDown
newtype ResizeEvent = ResizeEvent Coords2D
instance LikeJS "Array" ResizeEvent
-- | Use JavaScript ReactiveBanana.PointerEventValue
newtype PointerEventValue = PointerEventValue JSVal
instance LikeJS "PointerEvent" PointerEventValue
-- | Click on an element
newtype ElementClick = ElementClick HTMLElement
instance LikeJS "HTMLElement" ElementClick
-- | Time of events is Double
newtype Time = Time Double
deriving (Eq,Ord,Show,Num,Real,RealFrac,RealFloat,Fractional,Floating)
instance LikeJS "Number" Time where
asLikeJS = Time . asLikeJS
asJSVal (Time v) = asJSVal v
-- | Positions of all pointers
foreign import javascript unsafe "$1.pointers" pointers :: PointerEventValue -> JS.Array Coords2D
-- | JS type of event
foreign import javascript unsafe "$1.type" eventType :: PointerEventValue -> JSString
-- | Id of a mouse button pressed (or zero for touches)
foreign import javascript unsafe "$1.button" button :: PointerEventValue -> Int
-- | A modifier key. A list of modifier keys is reported for every key press
-- and mouse click.
data ModKey = Shift | Ctrl | Alt | Meta
deriving (Show, Read, Ord, Eq, Bounded, Enum)
-- | Javascript object containing x and y screen coordinates
newtype Coords2D = Coords2D JSVal
instance LikeJS "Array" Coords2D
-- | Get pointer x coordinate
foreign import javascript unsafe "$1[0]" coordX :: Coords2D -> Double
-- | Get pointer y coordinate
foreign import javascript unsafe "$1[1]" coordY :: Coords2D -> Double
-- | Get pointer coordinates
foreign import javascript unsafe "$r1=$1[0];$r2=$1[1];" unpackCoords2D :: Coords2D -> (Double, Double)
| mb21/qua-kit | libs/hs/reactive-banana-ghcjs/src/Reactive/Banana/JsHs/Types.hs | mit | 3,335 | 24 | 10 | 561 | 576 | 360 | 216 | 55 | 0 |
import Data.List
import Data.Char
import Data.Maybe
import Data.Either
import Data.List.Utils
main = do
contents <- readFile "day10input.txt"
let result = compute $ lines contents
print result
type Value = Int
type BotID = Int
type Bot = (BotID, [Value])
type GiveInstruction = (BotID, BotID, BotID)
type TakeInstruction = (BotID, Value)
type State = [Bot]
initState :: State
initState = []
compute :: [String] -> Int
compute input = multiply $ shareAndCheck shareInstrs initialReceive
where (shareInstrs, receiveInstrs) = parseInput input
initialReceive = foldl receive initState receiveInstrs
multiply :: State -> Int
multiply [] = 1
multiply ((b, [y]):bs)
| isOutput b = y * multiply bs
multiply (_:bs) = multiply bs
outputs :: [BotID]
outputs = [1000,1001,1002]
isOutput :: BotID -> Bool
isOutput b = b `elem` outputs
shareAndCheck :: [GiveInstruction] -> State -> State
shareAndCheck instructions state
| isTarget state $ length outputs = state
| otherwise = shareAndCheck instructions $ share state instructions
isTarget :: State -> Int -> Bool
isTarget _ 0 = True
isTarget [] _ = False
isTarget ((b, [x]) : bs) n
| isOutput b = isTarget bs (n-1)
| otherwise = isTarget bs n
isTarget (_ : bs) n = isTarget bs n
share :: State -> [GiveInstruction] -> State
share state instructions = receive (receive (empty state b) (r1, low)) (r2, high)
where (b, l) = fromJust $ find (\(x, l) -> length l == 2) state
(_, r1, r2) = fromJust $ find (\(s,_,_) -> s == b) instructions
[low, high] = sort l
empty :: State -> BotID -> State
empty ((b, l) : bs) b'
| b == b' = (b, []) : bs
| otherwise = (b, l) : empty bs b'
receive :: State -> TakeInstruction -> State
receive [] (id, val) = [(id, [val])]
receive ((b, vals) : bs) (id, val)
| b == id = (b, val : vals) : bs
| otherwise = (b, vals) : receive bs (id, val)
--- Input parsing ---
parseInput :: [String] -> ([GiveInstruction],[TakeInstruction])
parseInput input = (lefts parsedInput, rights parsedInput)
where parsedInput = map parseInstr input
parseInstr :: String -> Either GiveInstruction TakeInstruction
parseInstr instr = case numbers of
(x:y:z:[]) -> Left (read x, read y, read z)
(x:y:[]) -> Right (read y, read x)
where numbers = filter isNumStr $ words $ replace "output " "bot 100" instr -- Replace output with "bot 100..." to differenciate
isNumStr:: String -> Bool
isNumStr str = and $ map (\c -> isDigit c || c == '-') str | aBhallo/AoC2016 | Day 10/day10part2.hs | mit | 2,552 | 0 | 12 | 586 | 1,103 | 588 | 515 | 65 | 2 |
{-# LANGUAGE DeriveDataTypeable, FlexibleContexts, FlexibleInstances,
FunctionalDependencies, GeneralizedNewtypeDeriving, MultiParamTypeClasses,
NoMonomorphismRestriction, TypeSynonymInstances #-}
module Yi.Snippets where
import Prelude ()
import Yi.Prelude
import Control.Arrow
import Control.Monad.RWS hiding (mapM, mapM_, forM, forM_, sequence)
import Data.List hiding (foldl', find, elem, concat, concatMap)
import Data.Char (isSpace)
import Data.Maybe (fromJust, isJust)
import Yi.Buffer
import Yi.Dynamic
import Yi.Keymap
import Yi.Keymap.Keys
import Yi.Keymap.Vim (savingInsertCharB)
import Yi.TextCompletion
type SnippetCmd = RWST (Int, Int) [MarkInfo] () BufferM
data SnippetMark = SimpleMark !Int
| ValuedMark !Int String
| DependentMark !Int
data MarkInfo = SimpleMarkInfo { userIndex :: !Int, startMark :: !Mark }
| ValuedMarkInfo { userIndex :: !Int, startMark :: !Mark, endMark :: !Mark }
| DependentMarkInfo { userIndex :: !Int, startMark :: !Mark, endMark :: !Mark }
deriving (Eq, Show)
newtype BufferMarks = BufferMarks { bufferMarks :: [MarkInfo] }
deriving (Eq, Show, Monoid, Typeable)
newtype DependentMarks = DependentMarks { marks :: [[MarkInfo]] }
deriving (Eq, Show, Monoid, Typeable)
instance Initializable BufferMarks where
initial = BufferMarks []
instance Initializable DependentMarks where
initial = DependentMarks []
instance Ord MarkInfo where
a `compare` b = (userIndex a) `compare` (userIndex b)
cursor = SimpleMark
cursorWith = ValuedMark
dep = DependentMark
isDependentMark (SimpleMarkInfo _ _) = False
isDependentMark (ValuedMarkInfo _ _ _) = False
isDependentMark (DependentMarkInfo _ _ _) = True
bufferMarkers (SimpleMarkInfo _ s) = [s]
bufferMarkers m = [startMark m, endMark m]
-- used to translate a datatype into a snippet cmd for
-- freely combining data with '&'
class MkSnippetCmd a b | a -> b where
mkSnippetCmd :: a -> SnippetCmd b
instance MkSnippetCmd String () where
mkSnippetCmd = text
instance MkSnippetCmd (SnippetCmd a) a where
mkSnippetCmd = id
-- mkSnippetCmd for 'cursor...'-functions
instance MkSnippetCmd SnippetMark () where
mkSnippetCmd (SimpleMark i) = do
mk <- mkMark
tell [SimpleMarkInfo i mk]
mkSnippetCmd (ValuedMark i str) = do
start <- mkMark
lift $ insertN str
end <- mkMark
tell [ValuedMarkInfo i start end]
mkSnippetCmd (DependentMark i) = do
start <- mkMark
end <- mkMark
tell [DependentMarkInfo i start end]
-- create a mark at current position
mkMark = lift $ do p <- pointB
newMarkB $ MarkValue p Backward
-- Indentation support has been temporarily removed
text :: String -> SnippetCmd ()
text txt = do
(_, indent) <- ask
indentSettings <- lift indentSettingsB
lift . foldl' (>>) (return ()) .
intersperse (newlineB >> indentToB indent) .
map (if expandTabs indentSettings
then insertN . expand indentSettings ""
else insertN) $ lines' txt
where
lines' txt = if last txt == '\n' -- TODO: not very efficient yet
then lines txt ++ [""]
else lines txt
expand _ str [] = reverse str
expand indentSettings str (s:rst)
| s == '\t' = expand indentSettings ((replicate (tabSize indentSettings) ' ') ++ str) rst
| otherwise = expand indentSettings (s:str) rst
-- unfortunatelly data converted to snippets are no monads,
-- but & is very similar to >> abd &> is similar to >>=,
-- since SnippetCmd's can be used monadic
infixr 5 &
(&) :: (MkSnippetCmd a any , MkSnippetCmd b c) => a -> b -> SnippetCmd c
str & rst = mkSnippetCmd str >> mkSnippetCmd rst
(&>) :: (MkSnippetCmd a b, MkSnippetCmd c d) => a -> (b -> c) -> SnippetCmd d
str &> rst = mkSnippetCmd str >>= mkSnippetCmd . rst
runSnippet :: Bool -> SnippetCmd a -> BufferM a
runSnippet deleteLast s = do
line <- lineOf =<< pointB
indent <- indentOfCurrentPosB
(a, markInfo) <- evalRWST s (line, indent) ()
unless (null markInfo) $ do
let newMarks = sort $ filter (not . isDependentMark) markInfo
let newDepMarks = filter (not . len1) $
groupBy belongTogether $
sort markInfo
modA bufferDynamicValueA ((BufferMarks newMarks) `mappend`)
unless (null newDepMarks) $ do
modA bufferDynamicValueA ((DependentMarks newDepMarks) `mappend`)
moveToNextBufferMark deleteLast
return a
where
len1 (x:[]) = True
len1 _ = False
belongTogether a b = userIndex a == userIndex b
updateUpdatedMarks :: [Update] -> BufferM ()
updateUpdatedMarks upds = findEditedMarks upds >>=
mapM_ updateDependents
findEditedMarks :: [Update] -> BufferM [MarkInfo]
findEditedMarks upds = sequence (map findEditedMarks' upds) >>=
return . nub . concat
where
findEditedMarks' :: Update -> BufferM [MarkInfo]
findEditedMarks' upd = do
let p = updatePoint upd
ms <- return . nub . concat . marks =<< getA bufferDynamicValueA
ms <- forM ms $ \m ->do
r <- adjMarkRegion m
return $ if (updateIsDelete upd && p `nearRegion` r)
|| p `inRegion` r
then Just m
else Nothing
return . map fromJust . filter isJust $ ms
dependentSiblings :: MarkInfo -> [[MarkInfo]] -> [MarkInfo]
dependentSiblings mark deps =
case find (elem mark) deps of
Nothing -> []
Just lst -> filter (not . (mark==)) lst
updateDependents :: MarkInfo -> BufferM ()
updateDependents m = getA bufferDynamicValueA >>= updateDependents' m . marks
updateDependents' :: MarkInfo -> [[MarkInfo]] -> BufferM ()
updateDependents' mark deps =
case dependentSiblings mark deps of
[] -> return ()
deps -> do
txt <- markText mark
forM_ deps $ \d -> do
dTxt <- markText d
when (txt /= dTxt) $
setMarkText txt d
markText :: MarkInfo -> BufferM String
markText m = markRegion m >>= readRegionB
setMarkText :: String -> MarkInfo -> BufferM ()
setMarkText txt (SimpleMarkInfo _ start) = do
p <- getMarkPointB start
c <- readAtB p
if (isSpace c)
then insertNAt txt p
else do r <- regionOfPartNonEmptyAtB unitViWordOnLine Forward p
modifyRegionClever (const txt) r
setMarkText txt mi = do
start <- getMarkPointB $ startMark mi
end <- getMarkPointB $ endMark mi
let r = mkRegion start end
modifyRegionClever (const txt) r
when (start == end) $
setMarkPointB (endMark mi) (end + (Point $ length txt))
withSimpleRegion (SimpleMarkInfo _ s) f = do
p <- getMarkPointB s
c <- readAtB p
if isSpace c
then return $ mkRegion p p -- return empty region
else f =<< regionOfPartNonEmptyAtB unitViWordOnLine Forward p
markRegion m@(SimpleMarkInfo _ s) = withSimpleRegion m $ \r -> do
os <- findOverlappingMarksWith safeMarkRegion concat True r m
rOs <- mapM safeMarkRegion os
return . mkRegion (regionStart r) $ foldl' minEnd (regionEnd r) rOs
where
minEnd end r = if regionEnd r < end
then end
else min end $ regionStart r
markRegion m = liftM2 mkRegion
(getMarkPointB $ startMark m)
(getMarkPointB $ endMark m)
safeMarkRegion m@(SimpleMarkInfo _ _) = withSimpleRegion m return
safeMarkRegion m = markRegion m
adjMarkRegion s@(SimpleMarkInfo _ _) = markRegion s
adjMarkRegion m = do
s <- getMarkPointB $ startMark m
e <- getMarkPointB $ endMark m
c <- readAtB e
when (isWordChar c) $ do adjustEnding e
repairOverlappings e
e <- getMarkPointB $ endMark m
s <- adjustStart s e
return $ mkRegion s e
where
adjustEnding end = do
r' <- regionOfPartNonEmptyAtB unitViWordOnLine Forward end
setMarkPointB (endMark m) (regionEnd r')
adjustStart s e = do
txt <- readRegionB (mkRegion s e)
let sP = s + (Point . length $ takeWhile isSpace txt)
when (sP > s) $ do
setMarkPointB (startMark m) sP
return sP
-- test if we generated overlappings and repair
repairOverlappings origEnd = do overlappings <- allOverlappingMarks True m
when (not $ null overlappings) $
setMarkPointB (endMark m) origEnd
findOverlappingMarksWith :: (MarkInfo -> BufferM Region) ->
([[MarkInfo]] -> [MarkInfo]) -> Bool -> Region ->
MarkInfo -> BufferM [MarkInfo]
findOverlappingMarksWith fMarkRegion flattenMarks border r m =
getA bufferDynamicValueA >>=
return . filter (not . (m==)) . flattenMarks . marks >>=
filterM (liftM (regionsOverlap border r) . fMarkRegion)
findOverlappingMarks :: ([[MarkInfo]] -> [MarkInfo]) -> Bool -> Region ->
MarkInfo -> BufferM [MarkInfo]
findOverlappingMarks = findOverlappingMarksWith markRegion
regionsOverlappingMarks :: Bool -> Region -> MarkInfo -> BufferM [MarkInfo]
regionsOverlappingMarks = findOverlappingMarks concat
overlappingMarks :: Bool -> Bool -> MarkInfo -> BufferM [MarkInfo]
overlappingMarks border belongingTogether mark = do
r <- markRegion mark
findOverlappingMarks (if belongingTogether
then dependentSiblings mark
else concat)
border
r
mark
allOverlappingMarks :: Bool -> MarkInfo -> BufferM [MarkInfo]
allOverlappingMarks border = overlappingMarks border False
dependentOverlappingMarks :: Bool -> MarkInfo -> BufferM [MarkInfo]
dependentOverlappingMarks border = overlappingMarks border True
nextBufferMark :: Bool -> BufferM (Maybe MarkInfo)
nextBufferMark deleteLast = do
BufferMarks ms <- getA bufferDynamicValueA
if (null ms)
then return Nothing
else do putA bufferDynamicValueA . BufferMarks . (if deleteLast then (const $ tail ms) else (tail ms ++)) $ [head ms]
return . Just $ head ms
isDependentMarker bMark = do
DependentMarks ms <- getA bufferDynamicValueA
return . elem bMark . concatMap bufferMarkers . concat $ ms
safeDeleteMarkB m = do
b <- isDependentMarker m
unless b (deleteMarkB m)
moveToNextBufferMark :: Bool -> BufferM ()
moveToNextBufferMark deleteLast = do
p <- nextBufferMark deleteLast
case p of
Just p -> mv p
Nothing -> return ()
where
mv (SimpleMarkInfo _ m) = do
moveTo =<< getMarkPointB m
when deleteLast $ safeDeleteMarkB m
mv (ValuedMarkInfo _ s e) = do
sp <- getMarkPointB s
ep <- getMarkPointB e
deleteRegionB (mkRegion sp ep)
moveTo sp
when deleteLast $ do
safeDeleteMarkB s
safeDeleteMarkB e
-- Keymap support
newtype SupertabExt = Supertab (String -> Maybe (BufferM ()))
instance Monoid SupertabExt where
mempty = Supertab $ const Nothing
(Supertab f) `mappend` (Supertab g) =
Supertab $ \s -> f s `mplus` g s
superTab :: (MonadInteract m Action Event) => Bool -> SupertabExt -> m ()
superTab caseSensitive (Supertab expander) =
some (spec KTab ?>>! doSuperTab) >> deprioritize >>! resetComplete
where
doSuperTab = do canExpand <- withBuffer $ do
sol <- atSol
ws <- hasWhiteSpaceBefore
return $ sol || ws
if canExpand
then insertTab
else runCompleter
insertTab = withBuffer $ mapM_ savingInsertCharB =<< tabB
runCompleter = do w <- withBuffer $ readPrevWordB
case expander w of
Just cmd -> withBuffer $ do bkillWordB >> cmd
_ -> autoComplete
autoComplete = wordCompleteString' caseSensitive >>=
withBuffer . (bkillWordB >>) . insertN
-- | Convert snippet description list into a SuperTab extension
fromSnippets :: Bool -> [(String, SnippetCmd ())] -> SupertabExt
fromSnippets deleteLast snippets =
Supertab $ \str -> lookup str $ map (second $ runSnippet deleteLast) snippets
snippet = mkSnippetCmd
| codemac/yi-editor | src/Yi/Snippets.hs | gpl-2.0 | 12,852 | 0 | 21 | 3,905 | 3,853 | 1,906 | 1,947 | 297 | 4 |
module Network.Gitit2.Handler.Random (
getRandomR
) where
import Control.Monad (filterM)
import Data.FileStore (index)
import Network.Gitit2.Import
import System.Random (randomRIO)
getRandomR :: HasGitit master => GH master Html
getRandomR = do
fs <- filestore <$> getYesod
files <- liftIO $ index fs
pages <- mapM pageForPath =<< filterM (fmap not . isDiscussPageFile)
=<< filterM isPageFile files
pagenum <- liftIO $ randomRIO (0, length pages - 1)
let thepage = pages !! pagenum
redirect $ ViewR thepage
| thkoch2001/gitit2 | Network/Gitit2/Handler/Random.hs | gpl-2.0 | 553 | 0 | 13 | 119 | 182 | 92 | 90 | 15 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module PeerThreadMock
( peerThreadMain
) where
import Prelude hiding (readFile)
-- import Test.Tasty.HUnit (testCase, (@?=))
--
-- import FuncTorrent.ControlThread
import Control.Concurrent
import Control.Lens
import System.Timeout
import Data.IORef
import System.IO
import FuncTorrent.PeerThreadData
peerThreadMain :: PeerThread -> IO ()
peerThreadMain pt = do
toDoAction <- getAction
case toDoAction of
InitPeerConnection -> do
threadDelay $ 1000*1000
setStatus InitDone
GetPeerStatus -> do
threadDelay $ 1000*1000
setStatus PeerReady
GetPieces piece ->
setStatus Downloading
Seed ->
setStatus Seeding
StayIdle ->
setStatus PeerReady
peerThreadMain pt
where setStatus = putMVar (pt^.peerTStatus)
getAction = takeMVar (pt^.peerTAction)
| dfordivam/functorrent | test/PeerThreadMock.hs | gpl-3.0 | 867 | 0 | 13 | 186 | 202 | 103 | 99 | 29 | 5 |
-- grid is a game written in Haskell
-- Copyright (C) 2018 [email protected]
--
-- This file is part of grid.
--
-- grid is free software: you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- grid is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with grid. If not, see <http://www.gnu.org/licenses/>.
--
module Game.LevelPuzzleMode.LevelPuzzleWorld.OutputState
(
#ifdef GRID_FANCY
module Game.LevelPuzzleMode.LevelPuzzleWorld.OutputState.Fancy,
#else
module Game.LevelPuzzleMode.LevelPuzzleWorld.OutputState.Plain,
#endif
) where
#ifdef GRID_FANCY
import Game.LevelPuzzleMode.LevelPuzzleWorld.OutputState.Fancy
#else
import Game.LevelPuzzleMode.LevelPuzzleWorld.OutputState.Plain
#endif
| karamellpelle/grid | designer/source/Game/LevelPuzzleMode/LevelPuzzleWorld/OutputState.hs | gpl-3.0 | 1,150 | 0 | 5 | 175 | 52 | 45 | 7 | 4 | 0 |
{-# LANGUAGE NoMonomorphismRestriction#-}
{-# LANGUAGE NoImplicitPrelude #-}
module Bamboo.Theme.MiniHTML5.Widget.Template (page) where
import Bamboo.Theme.MiniHTML5.Env hiding (navigation, sidebar)
import Bamboo.Helper.StateHelper hiding (uri)
import Bamboo.Type.State
import Web.HCheat
import Text.HTML.Moe.DSL.Kawaii
import Data.ByteString.Lazy.Char8 (fromChunks)
import Bamboo.Theme.MiniHTML5.Widget.Navigation
import Bamboo.Theme.MiniHTML5.Widget.Sidebar
import Bamboo.Theme.MiniHTML5.Widget.Helper
import qualified Bamboo.Type.Config as C
import qualified Bamboo.Type.State as S
import qualified Bamboo.Type.Theme as T
import Hack
import Text.HTML.Moe.Type (Attribute)
charset :: String -> Attribute
charset = attr "charset"
role :: String -> Attribute
role = _class
template, page :: State -> MoeUnit -> Response
template s x = html_response - fromChunks - return - render' - do
raw doc_type
html' - do
head' - do
let apply_if f g y = if f y then g y else y
root = (s.env.slashed_script_name /)
page_title = s.config.blog_title ++ s.S.resource_title.apply_if (null > not) (" / " ++)
meta [charset "utf-8"]
-- google chrome frame for ie
meta [http_equiv "X-UA-Compatible", content "chrome=1"]
title' - str - page_title
link [rel "icon", _type "image/png", href - root - s.config.favicon]
link [rel "alternate", _type "application/rss+xml", href - rss_url_link_pair s .fst, attr "title" "RSS 2.0"]
s.config.theme_config.T.css.mapM_ (root > css)
s.config.theme_config.T.js.mapM_(root > js)
-- html5 enabler for ie
prim "<!--[if IE]><script src=\"http://html5shiv.googlecode.com/svn/trunk/html5.js\"></script><![endif]-->"
body' - do
header [role "banner"] - do
div [_class "title"] -
str - s.config.blog_title
div [_class "sub-title"] -
str - s.config.blog_subtitle
when (has_extension Search) -
form [action "/search", method "get", role "search"] -
input [name "s", id "s", _type "text", value ""]
navigation s
div [id "page"] - do
section [role "posts"] - x
sidebar s
footer' - do
case s.config.C.footer of
Just y -> y.markup.show_html
Nothing -> do
str - "©2009 " ++ s.config.blog_title
br'
str - "Powered by "
a [href - s.config.bamboo_url] - str "Bamboo"
str - " using "
a [href "http://www.haskell.org/"] - str "Haskell"
when (has_extension Analytics) -
raw - analytics - s.config.analytics_account_id
disqus_code
page = template
| nfjinjing/bamboo-theme-mini-html5 | src/Bamboo/Theme/MiniHTML5/Widget/Template.hs | gpl-3.0 | 2,711 | 5 | 27 | 672 | 817 | 422 | 395 | 64 | 3 |
module P14TaxCalc where
import Control.Monad (when) -- to meet constraint of having no "else" clause but note that
-- without mutation you have to faff some to make it work
import Library
main :: IO ()
main = do
amt <- promptNonNegFloat "Amount: "
state <- promptS "State: "
putStrLn $ "Total (exc tax): " ++ showD amt
when (uc state == "WI") $ putStrLn $ wisconsinTax amt
wisconsinTax :: Float -> String
wisconsinTax a =
"Tax: " ++ showD8 tax
++ "\n"
++ "Total: " ++ showD8 (a + tax)
where
tax = a * 0.055
-- TODO: Full state name lookup
| ciderpunx/57-exercises-for-programmers | src/P14TaxCalc.hs | gpl-3.0 | 641 | 0 | 12 | 205 | 153 | 77 | 76 | 15 | 1 |
{-# LANGUAGE ViewPatterns #-}
module Run.MutTest (runMutTest) where
import Prelude hiding (writeFile)
import Data.ByteString.Lazy hiding (putStrLn)
import Data.List hiding (length)
import Data.Maybe
import GHC.Int
import Control.Monad
import Control.DeepSeq
import System.Directory
import Test.QuickFuzz.Gen.FormatInfo
import Args
import Debug
import Exception
import Process
import Utils
import Utils.Decoding
import Utils.Mutation
import Test.QuickCheck.Random (mkQCGen)
import Test.QuickCheck.Gen
import System.Random (randomIO)
-- |Return a lazy list of steps to execute
getSteps :: QFCommand -> [Int]
getSteps (maxTries -> Nothing) = [1..]
getSteps (maxTries -> Just n) = [1..n]
-- Run test subcommand
runMutTest :: (Show actions, Show base, NFData base) =>
QFCommand -> FormatInfo base actions -> IO ()
runMutTest cmd fmt = do
debug (show cmd)
when (hasActions fmt)
(putStrLn "Selected format supports actions based generation/shrinking!")
createDirectoryIfMissing True (outDir cmd)
values <- strictDecode cmd fmt
mkName <- nameMaker cmd fmt
(shcmd, testname) <- prepareCli cmd fmt
let cleanup = when (usesFile cmd) $ removeFile testname
-- Generation-execution-report loop
forM_ (getSteps cmd) $ \n -> handleSigInt cleanup $ do
let size = sawSize cmd n
-- Generate a value and encode it in a bytestring.
-- This fully evaluates the generated value, and retry
-- the generation if the value cant be encoded.
-- Mutate some value.
(mutated, seed) <- strictMutate cmd fmt values size
-- Execute the command using either a file or stdin.
exitcode <- if usesFile cmd
then writeFile testname mutated >> execute (verbose cmd) shcmd
else executeFromStdin (verbose cmd) shcmd mutated
-- Report and move failed test cases.
when (hasFailed exitcode) $ do
let failname = mkName n seed size
mapM_ putStrLn [ "Test case number " ++ show n ++ " has failed. "
, "Moving to " ++ failname ]
if usesFile cmd
then renameFile testname failname
else writeFile failname mutated
-- Shrink if necessary
{-
when (hasFailed exitcode && shrinking cmd) $ do
-- Execute a shrinking stategy acordingly to the -a/--actions flag
(smallest, nshrinks, nfails) <- if hasActions fmt
then runShrinkActions cmd fmt shcmd testname
(fromJust mbacts) (diff encoded mutated)
else runShrinkByteString cmd shcmd testname mutated
printShrinkingFinished
-- Report the shrinking results
let shrinkName = mkName n seed size ++ ".reduced"
mapM_ putStrLn
[ "Reduced from " ++ show (length mutated) ++ " bytes"
++ " to " ++ show (length smallest) ++ " bytes"
, "After executing " ++ show nshrinks ++ " shrinks with "
++ show nfails ++ " failing shrinks. "
, "Saving to " ++ shrinkName ]
writeFile shrinkName smallest
-}
when (not $ verbose cmd) (printTestStep n)
-- Clean up the mess
cleanup
when (not $ verbose cmd) printFinished
| CIFASIS/QuickFuzz | app/Run/MutTest.hs | gpl-3.0 | 3,440 | 0 | 19 | 1,080 | 605 | 313 | 292 | 51 | 3 |
module HEP.Automation.MadGraph.Dataset.Set20110315set12 where
import HEP.Automation.MadGraph.Model
import HEP.Automation.MadGraph.Machine
import HEP.Automation.MadGraph.UserCut
import HEP.Automation.MadGraph.Cluster
import HEP.Automation.MadGraph.SetupType
import HEP.Automation.MadGraph.Dataset.Common
my_ssetup :: ScriptSetup
my_ssetup = SS {
scriptbase = "/nobackup/iankim/nfs/workspace/ttbar/mc_script/"
, mg5base = "/nobackup/iankim/montecarlo/MG_ME_V4.4.44/MadGraph5_v0_6_1/"
, workbase = "/nobackup/iankim/nfs/workspace/ttbar/mc/"
}
ucut :: UserCut
ucut = UserCut {
uc_metcut = 15.0
, uc_etacutlep = 1.2
, uc_etcutlep = 18.0
, uc_etacutjet = 2.5
, uc_etcutjet = 15.0
}
processTTBar0or1jet :: [Char]
processTTBar0or1jet =
"\ngenerate P P > t t~ QED=99 @1 \nadd process P P > t t~ J QED=99 @2 \n"
psetup_six_ttbar01j :: ProcessSetup
psetup_six_ttbar01j = PS {
mversion = MadGraph5
, model = Six
, process = processTTBar0or1jet
, processBrief = "ttbar01j"
, workname = "315Six1JBig"
}
my_csetup :: ClusterSetup
my_csetup = CS { cluster = Parallel 10 }
sixparamset :: [Param]
sixparamset = [ SixParam mass g
| mass <- [800.0]
, g <- [2.4] ]
psetuplist :: [ProcessSetup]
psetuplist = [ psetup_six_ttbar01j ]
sets :: [Int]
sets = [14..26]
sixtasklist :: [WorkSetup]
sixtasklist = [ WS my_ssetup (psetup_six_ttbar01j)
(rsetupGen p MLM (UserCutDef ucut) RunPGS 100000 num)
my_csetup
| p <- sixparamset , num <- sets ]
totaltasklist :: [WorkSetup]
totaltasklist = sixtasklist
| wavewave/madgraph-auto-dataset | src/HEP/Automation/MadGraph/Dataset/Set20110315set12.hs | gpl-3.0 | 1,694 | 0 | 10 | 409 | 356 | 219 | 137 | 46 | 1 |
{-
******************************************************************************
* I N V A D E R S *
* *
* Module: IdentityList *
* Purpose: Association list with automatic key assignment and *
* identity-preserving map and filter operations. *
* Author: Henrik Nilsson *
* *
* Copyright (c) Yale University, 2003 *
* *
******************************************************************************
-}
module Data.IdentityList (
ILKey, -- Identity-list key type
IL, -- Identity-list, abstract. Instance of functor.
emptyIL, -- :: IL a
insertIL_, -- :: a -> IL a -> IL a
insertIL, -- :: a -> IL a -> (ILKey, IL a)
listToIL, -- :: [a] -> IL a
keysIL, -- :: IL a -> [ILKey]
elemsIL, -- :: IL a -> [a]
assocsIL, -- :: IL a -> [(ILKey, a)]
deleteIL, -- :: ILKey -> IL a -> IL a
updateIL, -- :: ILKey -> a -> IL a -> IL a
updateILWith, -- :: ILKey -> (a -> a) -> IL a -> IL a
mapIL, -- :: ((ILKey, a) -> b) -> IL a -> IL b
filterIL, -- :: ((ILKey, a) -> Bool) -> IL a -> IL a
mapFilterIL, -- :: ((ILKey, a) -> Maybe b) -> IL a -> IL b
lookupIL, -- :: ILKey -> IL a -> Maybe a
findIL, -- :: ((ILKey, a) -> Bool) -> IL a -> Maybe a
mapFindIL, -- :: ((ILKey, a) -> Maybe b) -> IL a -> Maybe b
findAllIL, -- :: ((ILKey, a) -> Bool) -> IL a -> [a]
mapFindAllIL, -- :: ((ILKey, a) -> Maybe b) -> IL a -> [b]
ilSeq,
) where
import Data.List (find)
import Data.Foldable
------------------------------------------------------------------------------
-- Data type definitions
------------------------------------------------------------------------------
type ILKey = Int
-- Invariants:
-- * Sorted in descending key order. (We don't worry about
-- key wrap around).
-- * Keys are NOT reused
data IL a = IL { ilNextKey :: ILKey, ilAssocs :: [(ILKey, a)] }
instance Foldable IL where
foldMap f = foldMap f . map snd . ilAssocs
------------------------------------------------------------------------------
-- Class instances
------------------------------------------------------------------------------
instance Functor IL where
fmap f (IL {ilNextKey = nk, ilAssocs = kas}) =
IL {ilNextKey = nk, ilAssocs = [ (i, f a) | (i, a) <- kas ]}
------------------------------------------------------------------------------
-- Constructors
------------------------------------------------------------------------------
emptyIL :: IL a
emptyIL = IL {ilNextKey = 0, ilAssocs = []}
insertIL_ :: a -> IL a -> IL a
insertIL_ a il = snd (insertIL a il)
insertIL :: a -> IL a -> (ILKey, IL a)
insertIL a (IL {ilNextKey = k, ilAssocs = kas}) = (k, il') where
il' = IL {ilNextKey = k + 1, ilAssocs = (k, a) : kas}
listToIL :: [a] -> IL a
listToIL as = IL {ilNextKey = length as,
ilAssocs = reverse (zip [0..] as)} -- Maintain invariant!
------------------------------------------------------------------------------
-- Additional selectors
------------------------------------------------------------------------------
assocsIL :: IL a -> [(ILKey, a)]
assocsIL = ilAssocs
keysIL :: IL a -> [ILKey]
keysIL = map fst . ilAssocs
elemsIL :: IL a -> [a]
elemsIL = map snd . ilAssocs
------------------------------------------------------------------------------
-- Mutators
------------------------------------------------------------------------------
deleteIL :: ILKey -> IL a -> IL a
deleteIL k (IL {ilNextKey = nk, ilAssocs = kas}) =
IL {ilNextKey = nk, ilAssocs = deleteHlp kas}
where
deleteHlp [] = []
deleteHlp kakas@(ka@(k', _) : kas) | k > k' = kakas
| k == k' = kas
| otherwise = ka : deleteHlp kas
updateIL :: ILKey -> a -> IL a -> IL a
updateIL k v l = updateILWith k (const v) l
updateILWith :: ILKey -> (a -> a) -> IL a -> IL a
updateILWith k f l = mapIL g l
where g (k',v') | k == k' = f v'
| otherwise = v'
------------------------------------------------------------------------------
-- Filter and map operations
------------------------------------------------------------------------------
-- These are "identity-preserving", i.e. the key associated with an element
-- in the result is the same as the key of the element from which the
-- result element was derived.
mapIL :: ((ILKey, a) -> b) -> IL a -> IL b
mapIL f (IL {ilNextKey = nk, ilAssocs = kas}) =
IL {ilNextKey = nk, ilAssocs = [(k, f ka) | ka@(k,_) <- kas]}
filterIL :: ((ILKey, a) -> Bool) -> IL a -> IL a
filterIL p (IL {ilNextKey = nk, ilAssocs = kas}) =
IL {ilNextKey = nk, ilAssocs = filter p kas}
mapFilterIL :: ((ILKey, a) -> Maybe b) -> IL a -> IL b
mapFilterIL p (IL {ilNextKey = nk, ilAssocs = kas}) =
IL {
ilNextKey = nk,
ilAssocs = [(k, b) | ka@(k, _) <- kas, Just b <- [p ka]]
}
------------------------------------------------------------------------------
-- Lookup operations
------------------------------------------------------------------------------
lookupIL :: ILKey -> IL a -> Maybe a
lookupIL k il = lookup k (ilAssocs il)
findIL :: ((ILKey, a) -> Bool) -> IL a -> Maybe a
findIL p (IL {ilAssocs = kas}) = findHlp kas
where
findHlp [] = Nothing
findHlp (ka@(_, a) : kas) = if p ka then Just a else findHlp kas
mapFindIL :: ((ILKey, a) -> Maybe b) -> IL a -> Maybe b
mapFindIL p (IL {ilAssocs = kas}) = mapFindHlp kas
where
mapFindHlp [] = Nothing
mapFindHlp (ka : kas) = case p ka of
Nothing -> mapFindHlp kas
jb@(Just _) -> jb
findAllIL :: ((ILKey, a) -> Bool) -> IL a -> [a]
findAllIL p (IL {ilAssocs = kas}) = [ a | ka@(_, a) <- kas, p ka ]
mapFindAllIL:: ((ILKey, a) -> Maybe b) -> IL a -> [b]
mapFindAllIL p (IL {ilAssocs = kas}) = [ b | ka <- kas, Just b <- [p ka] ]
ilSeq :: IL a -> IL a
ilSeq il = mapSeq (ilAssocs il) `seq` il
mapSeq :: [a] -> [a]
mapSeq x = x `seq` mapSeq' x
mapSeq' [] = []
mapSeq' (a:as) = a `seq` mapSeq as
| keera-studios/pang-a-lambda | Experiments/collisions/Data/IdentityList.hs | gpl-3.0 | 6,679 | 0 | 12 | 2,038 | 1,696 | 943 | 753 | 93 | 3 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.AccessApproval.Organizations.ApprovalRequests.Dismiss
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Dismisses a request. Returns the updated ApprovalRequest. NOTE: This
-- does not deny access to the resource if another request has been made
-- and approved. It is equivalent in effect to ignoring the request
-- altogether. Returns NOT_FOUND if the request does not exist. Returns
-- FAILED_PRECONDITION if the request exists but is not in a pending state.
--
-- /See:/ <https://cloud.google.com/access-approval/docs Access Approval API Reference> for @accessapproval.organizations.approvalRequests.dismiss@.
module Network.Google.Resource.AccessApproval.Organizations.ApprovalRequests.Dismiss
(
-- * REST Resource
OrganizationsApprovalRequestsDismissResource
-- * Creating a Request
, organizationsApprovalRequestsDismiss
, OrganizationsApprovalRequestsDismiss
-- * Request Lenses
, oardXgafv
, oardUploadProtocol
, oardAccessToken
, oardUploadType
, oardPayload
, oardName
, oardCallback
) where
import Network.Google.AccessApproval.Types
import Network.Google.Prelude
-- | A resource alias for @accessapproval.organizations.approvalRequests.dismiss@ method which the
-- 'OrganizationsApprovalRequestsDismiss' request conforms to.
type OrganizationsApprovalRequestsDismissResource =
"v1" :>
CaptureMode "name" "dismiss" Text :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
ReqBody '[JSON] DismissApprovalRequestMessage :>
Post '[JSON] ApprovalRequest
-- | Dismisses a request. Returns the updated ApprovalRequest. NOTE: This
-- does not deny access to the resource if another request has been made
-- and approved. It is equivalent in effect to ignoring the request
-- altogether. Returns NOT_FOUND if the request does not exist. Returns
-- FAILED_PRECONDITION if the request exists but is not in a pending state.
--
-- /See:/ 'organizationsApprovalRequestsDismiss' smart constructor.
data OrganizationsApprovalRequestsDismiss =
OrganizationsApprovalRequestsDismiss'
{ _oardXgafv :: !(Maybe Xgafv)
, _oardUploadProtocol :: !(Maybe Text)
, _oardAccessToken :: !(Maybe Text)
, _oardUploadType :: !(Maybe Text)
, _oardPayload :: !DismissApprovalRequestMessage
, _oardName :: !Text
, _oardCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'OrganizationsApprovalRequestsDismiss' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'oardXgafv'
--
-- * 'oardUploadProtocol'
--
-- * 'oardAccessToken'
--
-- * 'oardUploadType'
--
-- * 'oardPayload'
--
-- * 'oardName'
--
-- * 'oardCallback'
organizationsApprovalRequestsDismiss
:: DismissApprovalRequestMessage -- ^ 'oardPayload'
-> Text -- ^ 'oardName'
-> OrganizationsApprovalRequestsDismiss
organizationsApprovalRequestsDismiss pOardPayload_ pOardName_ =
OrganizationsApprovalRequestsDismiss'
{ _oardXgafv = Nothing
, _oardUploadProtocol = Nothing
, _oardAccessToken = Nothing
, _oardUploadType = Nothing
, _oardPayload = pOardPayload_
, _oardName = pOardName_
, _oardCallback = Nothing
}
-- | V1 error format.
oardXgafv :: Lens' OrganizationsApprovalRequestsDismiss (Maybe Xgafv)
oardXgafv
= lens _oardXgafv (\ s a -> s{_oardXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
oardUploadProtocol :: Lens' OrganizationsApprovalRequestsDismiss (Maybe Text)
oardUploadProtocol
= lens _oardUploadProtocol
(\ s a -> s{_oardUploadProtocol = a})
-- | OAuth access token.
oardAccessToken :: Lens' OrganizationsApprovalRequestsDismiss (Maybe Text)
oardAccessToken
= lens _oardAccessToken
(\ s a -> s{_oardAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
oardUploadType :: Lens' OrganizationsApprovalRequestsDismiss (Maybe Text)
oardUploadType
= lens _oardUploadType
(\ s a -> s{_oardUploadType = a})
-- | Multipart request metadata.
oardPayload :: Lens' OrganizationsApprovalRequestsDismiss DismissApprovalRequestMessage
oardPayload
= lens _oardPayload (\ s a -> s{_oardPayload = a})
-- | Name of the ApprovalRequest to dismiss.
oardName :: Lens' OrganizationsApprovalRequestsDismiss Text
oardName = lens _oardName (\ s a -> s{_oardName = a})
-- | JSONP
oardCallback :: Lens' OrganizationsApprovalRequestsDismiss (Maybe Text)
oardCallback
= lens _oardCallback (\ s a -> s{_oardCallback = a})
instance GoogleRequest
OrganizationsApprovalRequestsDismiss
where
type Rs OrganizationsApprovalRequestsDismiss =
ApprovalRequest
type Scopes OrganizationsApprovalRequestsDismiss =
'["https://www.googleapis.com/auth/cloud-platform"]
requestClient
OrganizationsApprovalRequestsDismiss'{..}
= go _oardName _oardXgafv _oardUploadProtocol
_oardAccessToken
_oardUploadType
_oardCallback
(Just AltJSON)
_oardPayload
accessApprovalService
where go
= buildClient
(Proxy ::
Proxy OrganizationsApprovalRequestsDismissResource)
mempty
| brendanhay/gogol | gogol-accessapproval/gen/Network/Google/Resource/AccessApproval/Organizations/ApprovalRequests/Dismiss.hs | mpl-2.0 | 6,293 | 0 | 16 | 1,324 | 785 | 461 | 324 | 117 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Search.CSE.Siterestrict.List
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Returns metadata about the search performed, metadata about the engine
-- used for the search, and the search results. Uses a small set of url
-- patterns.
--
-- /See:/ <https://developers.google.com/custom-search/v1/introduction Custom Search API Reference> for @search.cse.siterestrict.list@.
module Network.Google.Resource.Search.CSE.Siterestrict.List
(
-- * REST Resource
CSESiterestrictListResource
-- * Creating a Request
, cSESiterestrictList
, CSESiterestrictList
-- * Request Lenses
, cseslImgDominantColor
, cseslXgafv
, cseslUploadProtocol
, cseslSiteSearchFilter
, cseslC2coff
, cseslOrTerms
, cseslAccessToken
, cseslStart
, cseslRights
, cseslUploadType
, cseslExcludeTerms
, cseslNum
, cseslFileType
, cseslSearchType
, cseslLr
, cseslQ
, cseslGooglehost
, cseslRelatedSite
, cseslHl
, cseslSort
, cseslSiteSearch
, cseslFilter
, cseslDateRestrict
, cseslLinkSite
, cseslLowRange
, cseslImgType
, cseslGl
, cseslCx
, cseslImgColorType
, cseslImgSize
, cseslExactTerms
, cseslCr
, cseslSafe
, cseslHq
, cseslCallback
, cseslHighRange
) where
import Network.Google.CustomSearch.Types
import Network.Google.Prelude
-- | A resource alias for @search.cse.siterestrict.list@ method which the
-- 'CSESiterestrictList' request conforms to.
type CSESiterestrictListResource =
"customsearch" :>
"v1" :>
"siterestrict" :>
QueryParam "imgDominantColor"
CSESiterestrictListImgDominantColor
:>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "siteSearchFilter"
CSESiterestrictListSiteSearchFilter
:>
QueryParam "c2coff" Text :>
QueryParam "orTerms" Text :>
QueryParam "access_token" Text :>
QueryParam "start" (Textual Word32) :>
QueryParam "rights" Text :>
QueryParam "uploadType" Text :>
QueryParam "excludeTerms" Text :>
QueryParam "num" (Textual Int32) :>
QueryParam "fileType" Text :>
QueryParam "searchType"
CSESiterestrictListSearchType
:>
QueryParam "lr" Text :>
QueryParam "q" Text :>
QueryParam "googlehost" Text :>
QueryParam "relatedSite" Text :>
QueryParam "hl" Text :>
QueryParam "sort" Text :>
QueryParam "siteSearch" Text
:>
QueryParam "filter" Text :>
QueryParam "dateRestrict"
Text
:>
QueryParam "linkSite"
Text
:>
QueryParam "lowRange"
Text
:>
QueryParam
"imgType"
CSESiterestrictListImgType
:>
QueryParam "gl"
Text
:>
QueryParam "cx"
Text
:>
QueryParam
"imgColorType"
CSESiterestrictListImgColorType
:>
QueryParam
"imgSize"
CSESiterestrictListImgSize
:>
QueryParam
"exactTerms"
Text
:>
QueryParam
"cr"
Text
:>
QueryParam
"safe"
CSESiterestrictListSafe
:>
QueryParam
"hq"
Text
:>
QueryParam
"callback"
Text
:>
QueryParam
"highRange"
Text
:>
QueryParam
"alt"
AltJSON
:>
Get
'[JSON]
Search
-- | Returns metadata about the search performed, metadata about the engine
-- used for the search, and the search results. Uses a small set of url
-- patterns.
--
-- /See:/ 'cSESiterestrictList' smart constructor.
data CSESiterestrictList =
CSESiterestrictList'
{ _cseslImgDominantColor :: !(Maybe CSESiterestrictListImgDominantColor)
, _cseslXgafv :: !(Maybe Xgafv)
, _cseslUploadProtocol :: !(Maybe Text)
, _cseslSiteSearchFilter :: !(Maybe CSESiterestrictListSiteSearchFilter)
, _cseslC2coff :: !(Maybe Text)
, _cseslOrTerms :: !(Maybe Text)
, _cseslAccessToken :: !(Maybe Text)
, _cseslStart :: !(Maybe (Textual Word32))
, _cseslRights :: !(Maybe Text)
, _cseslUploadType :: !(Maybe Text)
, _cseslExcludeTerms :: !(Maybe Text)
, _cseslNum :: !(Maybe (Textual Int32))
, _cseslFileType :: !(Maybe Text)
, _cseslSearchType :: !(Maybe CSESiterestrictListSearchType)
, _cseslLr :: !(Maybe Text)
, _cseslQ :: !(Maybe Text)
, _cseslGooglehost :: !(Maybe Text)
, _cseslRelatedSite :: !(Maybe Text)
, _cseslHl :: !(Maybe Text)
, _cseslSort :: !(Maybe Text)
, _cseslSiteSearch :: !(Maybe Text)
, _cseslFilter :: !(Maybe Text)
, _cseslDateRestrict :: !(Maybe Text)
, _cseslLinkSite :: !(Maybe Text)
, _cseslLowRange :: !(Maybe Text)
, _cseslImgType :: !(Maybe CSESiterestrictListImgType)
, _cseslGl :: !(Maybe Text)
, _cseslCx :: !(Maybe Text)
, _cseslImgColorType :: !(Maybe CSESiterestrictListImgColorType)
, _cseslImgSize :: !(Maybe CSESiterestrictListImgSize)
, _cseslExactTerms :: !(Maybe Text)
, _cseslCr :: !(Maybe Text)
, _cseslSafe :: !(Maybe CSESiterestrictListSafe)
, _cseslHq :: !(Maybe Text)
, _cseslCallback :: !(Maybe Text)
, _cseslHighRange :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'CSESiterestrictList' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'cseslImgDominantColor'
--
-- * 'cseslXgafv'
--
-- * 'cseslUploadProtocol'
--
-- * 'cseslSiteSearchFilter'
--
-- * 'cseslC2coff'
--
-- * 'cseslOrTerms'
--
-- * 'cseslAccessToken'
--
-- * 'cseslStart'
--
-- * 'cseslRights'
--
-- * 'cseslUploadType'
--
-- * 'cseslExcludeTerms'
--
-- * 'cseslNum'
--
-- * 'cseslFileType'
--
-- * 'cseslSearchType'
--
-- * 'cseslLr'
--
-- * 'cseslQ'
--
-- * 'cseslGooglehost'
--
-- * 'cseslRelatedSite'
--
-- * 'cseslHl'
--
-- * 'cseslSort'
--
-- * 'cseslSiteSearch'
--
-- * 'cseslFilter'
--
-- * 'cseslDateRestrict'
--
-- * 'cseslLinkSite'
--
-- * 'cseslLowRange'
--
-- * 'cseslImgType'
--
-- * 'cseslGl'
--
-- * 'cseslCx'
--
-- * 'cseslImgColorType'
--
-- * 'cseslImgSize'
--
-- * 'cseslExactTerms'
--
-- * 'cseslCr'
--
-- * 'cseslSafe'
--
-- * 'cseslHq'
--
-- * 'cseslCallback'
--
-- * 'cseslHighRange'
cSESiterestrictList
:: CSESiterestrictList
cSESiterestrictList =
CSESiterestrictList'
{ _cseslImgDominantColor = Nothing
, _cseslXgafv = Nothing
, _cseslUploadProtocol = Nothing
, _cseslSiteSearchFilter = Nothing
, _cseslC2coff = Nothing
, _cseslOrTerms = Nothing
, _cseslAccessToken = Nothing
, _cseslStart = Nothing
, _cseslRights = Nothing
, _cseslUploadType = Nothing
, _cseslExcludeTerms = Nothing
, _cseslNum = Nothing
, _cseslFileType = Nothing
, _cseslSearchType = Nothing
, _cseslLr = Nothing
, _cseslQ = Nothing
, _cseslGooglehost = Nothing
, _cseslRelatedSite = Nothing
, _cseslHl = Nothing
, _cseslSort = Nothing
, _cseslSiteSearch = Nothing
, _cseslFilter = Nothing
, _cseslDateRestrict = Nothing
, _cseslLinkSite = Nothing
, _cseslLowRange = Nothing
, _cseslImgType = Nothing
, _cseslGl = Nothing
, _cseslCx = Nothing
, _cseslImgColorType = Nothing
, _cseslImgSize = Nothing
, _cseslExactTerms = Nothing
, _cseslCr = Nothing
, _cseslSafe = Nothing
, _cseslHq = Nothing
, _cseslCallback = Nothing
, _cseslHighRange = Nothing
}
-- | Returns images of a specific dominant color. Acceptable values are: *
-- \`\"black\"\` * \`\"blue\"\` * \`\"brown\"\` * \`\"gray\"\` *
-- \`\"green\"\` * \`\"orange\"\` * \`\"pink\"\` * \`\"purple\"\` *
-- \`\"red\"\` * \`\"teal\"\` * \`\"white\"\` * \`\"yellow\"\`
cseslImgDominantColor :: Lens' CSESiterestrictList (Maybe CSESiterestrictListImgDominantColor)
cseslImgDominantColor
= lens _cseslImgDominantColor
(\ s a -> s{_cseslImgDominantColor = a})
-- | V1 error format.
cseslXgafv :: Lens' CSESiterestrictList (Maybe Xgafv)
cseslXgafv
= lens _cseslXgafv (\ s a -> s{_cseslXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
cseslUploadProtocol :: Lens' CSESiterestrictList (Maybe Text)
cseslUploadProtocol
= lens _cseslUploadProtocol
(\ s a -> s{_cseslUploadProtocol = a})
-- | Controls whether to include or exclude results from the site named in
-- the \`siteSearch\` parameter. Acceptable values are: * \`\"e\"\`:
-- exclude * \`\"i\"\`: include
cseslSiteSearchFilter :: Lens' CSESiterestrictList (Maybe CSESiterestrictListSiteSearchFilter)
cseslSiteSearchFilter
= lens _cseslSiteSearchFilter
(\ s a -> s{_cseslSiteSearchFilter = a})
-- | Enables or disables [Simplified and Traditional Chinese
-- Search](https:\/\/developers.google.com\/custom-search\/docs\/xml_results#chineseSearch).
-- The default value for this parameter is 0 (zero), meaning that the
-- feature is enabled. Supported values are: * \`1\`: Disabled * \`0\`:
-- Enabled (default)
cseslC2coff :: Lens' CSESiterestrictList (Maybe Text)
cseslC2coff
= lens _cseslC2coff (\ s a -> s{_cseslC2coff = a})
-- | Provides additional search terms to check for in a document, where each
-- document in the search results must contain at least one of the
-- additional search terms.
cseslOrTerms :: Lens' CSESiterestrictList (Maybe Text)
cseslOrTerms
= lens _cseslOrTerms (\ s a -> s{_cseslOrTerms = a})
-- | OAuth access token.
cseslAccessToken :: Lens' CSESiterestrictList (Maybe Text)
cseslAccessToken
= lens _cseslAccessToken
(\ s a -> s{_cseslAccessToken = a})
-- | The index of the first result to return. The default number of results
-- per page is 10, so \`&start=11\` would start at the top of the second
-- page of results. **Note**: The JSON API will never return more than 100
-- results, even if more than 100 documents match the query, so setting the
-- sum of \`start + num\` to a number greater than 100 will produce an
-- error. Also note that the maximum value for \`num\` is 10.
cseslStart :: Lens' CSESiterestrictList (Maybe Word32)
cseslStart
= lens _cseslStart (\ s a -> s{_cseslStart = a}) .
mapping _Coerce
-- | Filters based on licensing. Supported values include:
-- \`cc_publicdomain\`, \`cc_attribute\`, \`cc_sharealike\`,
-- \`cc_noncommercial\`, \`cc_nonderived\` and combinations of these. See
-- [typical
-- combinations](https:\/\/wiki.creativecommons.org\/wiki\/CC_Search_integration).
cseslRights :: Lens' CSESiterestrictList (Maybe Text)
cseslRights
= lens _cseslRights (\ s a -> s{_cseslRights = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
cseslUploadType :: Lens' CSESiterestrictList (Maybe Text)
cseslUploadType
= lens _cseslUploadType
(\ s a -> s{_cseslUploadType = a})
-- | Identifies a word or phrase that should not appear in any documents in
-- the search results.
cseslExcludeTerms :: Lens' CSESiterestrictList (Maybe Text)
cseslExcludeTerms
= lens _cseslExcludeTerms
(\ s a -> s{_cseslExcludeTerms = a})
-- | Number of search results to return. * Valid values are integers between
-- 1 and 10, inclusive.
cseslNum :: Lens' CSESiterestrictList (Maybe Int32)
cseslNum
= lens _cseslNum (\ s a -> s{_cseslNum = a}) .
mapping _Coerce
-- | Restricts results to files of a specified extension. A list of file
-- types indexable by Google can be found in Search Console [Help
-- Center](https:\/\/support.google.com\/webmasters\/answer\/35287).
cseslFileType :: Lens' CSESiterestrictList (Maybe Text)
cseslFileType
= lens _cseslFileType
(\ s a -> s{_cseslFileType = a})
-- | Specifies the search type: \`image\`. If unspecified, results are
-- limited to webpages. Acceptable values are: * \`\"image\"\`: custom
-- image search.
cseslSearchType :: Lens' CSESiterestrictList (Maybe CSESiterestrictListSearchType)
cseslSearchType
= lens _cseslSearchType
(\ s a -> s{_cseslSearchType = a})
-- | Restricts the search to documents written in a particular language
-- (e.g., \`lr=lang_ja\`). Acceptable values are: * \`\"lang_ar\"\`: Arabic
-- * \`\"lang_bg\"\`: Bulgarian * \`\"lang_ca\"\`: Catalan *
-- \`\"lang_cs\"\`: Czech * \`\"lang_da\"\`: Danish * \`\"lang_de\"\`:
-- German * \`\"lang_el\"\`: Greek * \`\"lang_en\"\`: English *
-- \`\"lang_es\"\`: Spanish * \`\"lang_et\"\`: Estonian * \`\"lang_fi\"\`:
-- Finnish * \`\"lang_fr\"\`: French * \`\"lang_hr\"\`: Croatian *
-- \`\"lang_hu\"\`: Hungarian * \`\"lang_id\"\`: Indonesian *
-- \`\"lang_is\"\`: Icelandic * \`\"lang_it\"\`: Italian * \`\"lang_iw\"\`:
-- Hebrew * \`\"lang_ja\"\`: Japanese * \`\"lang_ko\"\`: Korean *
-- \`\"lang_lt\"\`: Lithuanian * \`\"lang_lv\"\`: Latvian *
-- \`\"lang_nl\"\`: Dutch * \`\"lang_no\"\`: Norwegian * \`\"lang_pl\"\`:
-- Polish * \`\"lang_pt\"\`: Portuguese * \`\"lang_ro\"\`: Romanian *
-- \`\"lang_ru\"\`: Russian * \`\"lang_sk\"\`: Slovak * \`\"lang_sl\"\`:
-- Slovenian * \`\"lang_sr\"\`: Serbian * \`\"lang_sv\"\`: Swedish *
-- \`\"lang_tr\"\`: Turkish * \`\"lang_zh-CN\"\`: Chinese (Simplified) *
-- \`\"lang_zh-TW\"\`: Chinese (Traditional)
cseslLr :: Lens' CSESiterestrictList (Maybe Text)
cseslLr = lens _cseslLr (\ s a -> s{_cseslLr = a})
-- | Query
cseslQ :: Lens' CSESiterestrictList (Maybe Text)
cseslQ = lens _cseslQ (\ s a -> s{_cseslQ = a})
-- | **Deprecated**. Use the \`gl\` parameter for a similar effect. The local
-- Google domain (for example, google.com, google.de, or google.fr) to use
-- to perform the search.
cseslGooglehost :: Lens' CSESiterestrictList (Maybe Text)
cseslGooglehost
= lens _cseslGooglehost
(\ s a -> s{_cseslGooglehost = a})
-- | Specifies that all search results should be pages that are related to
-- the specified URL.
cseslRelatedSite :: Lens' CSESiterestrictList (Maybe Text)
cseslRelatedSite
= lens _cseslRelatedSite
(\ s a -> s{_cseslRelatedSite = a})
-- | Sets the user interface language. * Explicitly setting this parameter
-- improves the performance and the quality of your search results. * See
-- the [Interface
-- Languages](https:\/\/developers.google.com\/custom-search\/docs\/xml_results#wsInterfaceLanguages)
-- section of [Internationalizing Queries and Results
-- Presentation](https:\/\/developers.google.com\/custom-search\/docs\/xml_results#wsInternationalizing)
-- for more information, and (Supported Interface
-- Languages)[https:\/\/developers.google.com\/custom-search\/docs\/xml_results_appendices#interfaceLanguages]
-- for a list of supported languages.
cseslHl :: Lens' CSESiterestrictList (Maybe Text)
cseslHl = lens _cseslHl (\ s a -> s{_cseslHl = a})
-- | The sort expression to apply to the results. The sort parameter
-- specifies that the results be sorted according to the specified
-- expression i.e. sort by date. [Example:
-- sort=date](https:\/\/developers.google.com\/custom-search\/docs\/structured_search#sort-by-attribute).
cseslSort :: Lens' CSESiterestrictList (Maybe Text)
cseslSort
= lens _cseslSort (\ s a -> s{_cseslSort = a})
-- | Specifies a given site which should always be included or excluded from
-- results (see \`siteSearchFilter\` parameter, below).
cseslSiteSearch :: Lens' CSESiterestrictList (Maybe Text)
cseslSiteSearch
= lens _cseslSiteSearch
(\ s a -> s{_cseslSiteSearch = a})
-- | Controls turning on or off the duplicate content filter. * See
-- [Automatic
-- Filtering](https:\/\/developers.google.com\/custom-search\/docs\/xml_results#automaticFiltering)
-- for more information about Google\'s search results filters. Note that
-- host crowding filtering applies only to multi-site searches. * By
-- default, Google applies filtering to all search results to improve the
-- quality of those results. Acceptable values are: * \`0\`: Turns off
-- duplicate content filter. * \`1\`: Turns on duplicate content filter.
cseslFilter :: Lens' CSESiterestrictList (Maybe Text)
cseslFilter
= lens _cseslFilter (\ s a -> s{_cseslFilter = a})
-- | Restricts results to URLs based on date. Supported values include: *
-- \`d[number]\`: requests results from the specified number of past days.
-- * \`w[number]\`: requests results from the specified number of past
-- weeks. * \`m[number]\`: requests results from the specified number of
-- past months. * \`y[number]\`: requests results from the specified number
-- of past years.
cseslDateRestrict :: Lens' CSESiterestrictList (Maybe Text)
cseslDateRestrict
= lens _cseslDateRestrict
(\ s a -> s{_cseslDateRestrict = a})
-- | Specifies that all search results should contain a link to a particular
-- URL.
cseslLinkSite :: Lens' CSESiterestrictList (Maybe Text)
cseslLinkSite
= lens _cseslLinkSite
(\ s a -> s{_cseslLinkSite = a})
-- | Specifies the starting value for a search range. Use \`lowRange\` and
-- \`highRange\` to append an inclusive search range of
-- \`lowRange...highRange\` to the query.
cseslLowRange :: Lens' CSESiterestrictList (Maybe Text)
cseslLowRange
= lens _cseslLowRange
(\ s a -> s{_cseslLowRange = a})
-- | Returns images of a type. Acceptable values are: * \`\"clipart\"\` *
-- \`\"face\"\` * \`\"lineart\"\` * \`\"stock\"\` * \`\"photo\"\` *
-- \`\"animated\"\`
cseslImgType :: Lens' CSESiterestrictList (Maybe CSESiterestrictListImgType)
cseslImgType
= lens _cseslImgType (\ s a -> s{_cseslImgType = a})
-- | Geolocation of end user. * The \`gl\` parameter value is a two-letter
-- country code. The \`gl\` parameter boosts search results whose country
-- of origin matches the parameter value. See the [Country
-- Codes](https:\/\/developers.google.com\/custom-search\/docs\/xml_results_appendices#countryCodes)
-- page for a list of valid values. * Specifying a \`gl\` parameter value
-- should lead to more relevant results. This is particularly true for
-- international customers and, even more specifically, for customers in
-- English- speaking countries other than the United States.
cseslGl :: Lens' CSESiterestrictList (Maybe Text)
cseslGl = lens _cseslGl (\ s a -> s{_cseslGl = a})
-- | The Programmable Search Engine ID to use for this request.
cseslCx :: Lens' CSESiterestrictList (Maybe Text)
cseslCx = lens _cseslCx (\ s a -> s{_cseslCx = a})
-- | Returns black and white, grayscale, transparent, or color images.
-- Acceptable values are: * \`\"color\"\` * \`\"gray\"\` * \`\"mono\"\`:
-- black and white * \`\"trans\"\`: transparent background
cseslImgColorType :: Lens' CSESiterestrictList (Maybe CSESiterestrictListImgColorType)
cseslImgColorType
= lens _cseslImgColorType
(\ s a -> s{_cseslImgColorType = a})
-- | Returns images of a specified size. Acceptable values are: *
-- \`\"huge\"\` * \`\"icon\"\` * \`\"large\"\` * \`\"medium\"\` *
-- \`\"small\"\` * \`\"xlarge\"\` * \`\"xxlarge\"\`
cseslImgSize :: Lens' CSESiterestrictList (Maybe CSESiterestrictListImgSize)
cseslImgSize
= lens _cseslImgSize (\ s a -> s{_cseslImgSize = a})
-- | Identifies a phrase that all documents in the search results must
-- contain.
cseslExactTerms :: Lens' CSESiterestrictList (Maybe Text)
cseslExactTerms
= lens _cseslExactTerms
(\ s a -> s{_cseslExactTerms = a})
-- | Restricts search results to documents originating in a particular
-- country. You may use [Boolean
-- operators](https:\/\/developers.google.com\/custom-search\/docs\/xml_results_appendices#booleanOperators)
-- in the cr parameter\'s value. Google Search determines the country of a
-- document by analyzing: * the top-level domain (TLD) of the document\'s
-- URL * the geographic location of the Web server\'s IP address See the
-- [Country Parameter
-- Values](https:\/\/developers.google.com\/custom-search\/docs\/xml_results_appendices#countryCollections)
-- page for a list of valid values for this parameter.
cseslCr :: Lens' CSESiterestrictList (Maybe Text)
cseslCr = lens _cseslCr (\ s a -> s{_cseslCr = a})
-- | Search safety level. Acceptable values are: * \`\"active\"\`: Enables
-- SafeSearch filtering. * \`\"off\"\`: Disables SafeSearch filtering.
-- (default)
cseslSafe :: Lens' CSESiterestrictList (Maybe CSESiterestrictListSafe)
cseslSafe
= lens _cseslSafe (\ s a -> s{_cseslSafe = a})
-- | Appends the specified query terms to the query, as if they were combined
-- with a logical AND operator.
cseslHq :: Lens' CSESiterestrictList (Maybe Text)
cseslHq = lens _cseslHq (\ s a -> s{_cseslHq = a})
-- | JSONP
cseslCallback :: Lens' CSESiterestrictList (Maybe Text)
cseslCallback
= lens _cseslCallback
(\ s a -> s{_cseslCallback = a})
-- | Specifies the ending value for a search range. * Use \`lowRange\` and
-- \`highRange\` to append an inclusive search range of
-- \`lowRange...highRange\` to the query.
cseslHighRange :: Lens' CSESiterestrictList (Maybe Text)
cseslHighRange
= lens _cseslHighRange
(\ s a -> s{_cseslHighRange = a})
instance GoogleRequest CSESiterestrictList where
type Rs CSESiterestrictList = Search
type Scopes CSESiterestrictList = '[]
requestClient CSESiterestrictList'{..}
= go _cseslImgDominantColor _cseslXgafv
_cseslUploadProtocol
_cseslSiteSearchFilter
_cseslC2coff
_cseslOrTerms
_cseslAccessToken
_cseslStart
_cseslRights
_cseslUploadType
_cseslExcludeTerms
_cseslNum
_cseslFileType
_cseslSearchType
_cseslLr
_cseslQ
_cseslGooglehost
_cseslRelatedSite
_cseslHl
_cseslSort
_cseslSiteSearch
_cseslFilter
_cseslDateRestrict
_cseslLinkSite
_cseslLowRange
_cseslImgType
_cseslGl
_cseslCx
_cseslImgColorType
_cseslImgSize
_cseslExactTerms
_cseslCr
_cseslSafe
_cseslHq
_cseslCallback
_cseslHighRange
(Just AltJSON)
customSearchService
where go
= buildClient
(Proxy :: Proxy CSESiterestrictListResource)
mempty
| brendanhay/gogol | gogol-customsearch/gen/Network/Google/Resource/Search/CSE/Siterestrict/List.hs | mpl-2.0 | 27,473 | 0 | 47 | 9,287 | 3,247 | 1,882 | 1,365 | 463 | 1 |
module Codewars.Kata.Convert where
digitize :: Int -> [Int]
digitize s |s < 10 = [s]
|otherwise = mod s 10 : digitize (div s 10)
--
| ice1000/OI-codes | codewars/101-200/convert-number-to-reversed-array-of-digits.hs | agpl-3.0 | 145 | 0 | 9 | 39 | 70 | 36 | 34 | 4 | 1 |
module Data.GI.CodeGen.Util
( prime
, parenthesize
, padTo
, withComment
, ucFirst
, lcFirst
, modifyQualified
, tshow
, terror
, utf8ReadFile
, utf8WriteFile
, splitOn
) where
import Data.Monoid ((<>))
import Data.Char (toLower, toUpper)
import qualified Data.ByteString as B
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.Encoding as TE
padTo :: Int -> Text -> Text
padTo n s = s <> T.replicate (n - T.length s) " "
withComment :: Text -> Text -> Text
withComment a b = padTo 40 a <> "-- " <> b
prime :: Text -> Text
prime = (<> "'")
parenthesize :: Text -> Text
parenthesize s = "(" <> s <> ")"
-- | Construct the `Text` representation of a showable.
tshow :: Show a => a -> Text
tshow = T.pack . show
-- | Throw an error with the given `Text`.
terror :: Text -> a
terror = error . T.unpack
-- | Capitalize the first character of the given string.
ucFirst :: Text -> Text
ucFirst "" = ""
ucFirst t = T.cons (toUpper $ T.head t) (T.tail t)
-- | Make the first character of the given string lowercase.
lcFirst :: Text -> Text
lcFirst "" = ""
lcFirst t = T.cons (toLower $ T.head t) (T.tail t)
-- | Apply the given modification function to the given symbol. If the
-- symbol is qualified the modification will only apply to the last
-- component.
modifyQualified :: (Text -> Text) -> Text -> Text
modifyQualified f = T.intercalate "." . modify . T.splitOn "."
where modify :: [Text] -> [Text]
modify [] = []
modify (a:[]) = f a : []
modify (a:as) = a : modify as
-- | Split a list into sublists delimited by the given element.
splitOn :: Eq a => a -> [a] -> [[a]]
splitOn x xs = go xs []
where go [] acc = [reverse acc]
go (y : ys) acc = if x == y
then reverse acc : go ys []
else go ys (y : acc)
-- | Read a file assuming it is UTF-8 encoded. If decoding fails this
-- calls `error`.
utf8ReadFile :: FilePath -> IO T.Text
utf8ReadFile fname = do
bytes <- B.readFile fname
case TE.decodeUtf8' bytes of
Right text -> return text
Left error -> terror ("Input file " <> tshow fname <>
" seems not to be valid UTF-8. Error was:\n" <>
tshow error)
-- | Write the given `Text` into an UTF-8 encoded file.
utf8WriteFile :: FilePath -> T.Text -> IO ()
utf8WriteFile fname text = B.writeFile fname (TE.encodeUtf8 text)
| ford-prefect/haskell-gi | lib/Data/GI/CodeGen/Util.hs | lgpl-2.1 | 2,461 | 0 | 15 | 648 | 773 | 413 | 360 | 59 | 3 |
module Main where
import GLua.Lexer
import GLua.TokenTypes
import GLua.Parser
import GLua.AG.PrettyPrint
import Data.Char
import System.FilePath
import System.Environment
import System.IO
import System.Exit
import Control.Monad
help = unlines ["",
"Usage: GLuaParser <ACTION> <FILE>",
"",
"Possible actions:",
" fix - attempt to fix syntax errors in the Lua script"
]
main = do
args <- getArgs
-- Argument checking
when (length args < 2) $ do
putStrLn help
exitSuccess
let action = head args
let file = args !! 1
contents <- readFile file
-- Lex the file
let lex = execParseTokens contents
let tokens = fst lex
let errors = snd lex
-- Print any lexing errors
unless (null errors) $ do
mapM_ print errors
-- Attempt to fix errors when asked
when (map toLower action == "fix") $ do
writeFile file . concatMap show $ tokens
putStrLn "Success"
exitSuccess
exitWith (ExitFailure 1)
let ast = parseGLua tokens
putStrLn "Errors:"
mapM_ (putStrLn . renderError) . snd $ ast
putStrLn "Pretty printed code:"
putStrLn . prettyprint . fst $ ast
exitSuccess
| FPtje/LuaAnalysis | analysis/src/GLua/Main.hs | lgpl-2.1 | 1,231 | 0 | 15 | 354 | 341 | 162 | 179 | 40 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE ExistentialQuantification #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE KindSignatures #-}
{-# LANGUAGE MultiWayIf #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE StandaloneDeriving #-}
module HC2 where
import qualified Prelude
import Protolude
import Test.Hspec
data N (n :: Nat) = N deriving (Eq, Show)
-- existential
data SomeN = forall n . KnownNat n => SomeN (N n)
deriving instance Show SomeN
-- create type from runtime value
someNVal :: forall a . Int -> SomeN
someNVal i =
case someNatVal (fromIntegral i) of
Nothing -> panic "negative length"
Just (SomeNat (_ :: Proxy n)) -> SomeN (N :: N n)
-- extract existential and call continuation with its value
withN :: forall a r . SomeN -> (forall n. KnownNat n => N n -> r) -> r
withN s f = case s of SomeN n -> f n
h24 :: Spec
h24 = it "h24" $ withN (someNVal 3) f `shouldBe` 3
where
f :: forall n . KnownNat n => N n -> Int
f _ = fromIntegral (natVal (Proxy :: Proxy n)) -- create term from type
-- hack: create type at runtime and call compile-time indexed type
h25 :: Spec
h25 = it "h25" $ withN (someNVal 3) f `shouldBe` Just "3"
where
f :: forall n . KnownNat n => N n -> Maybe Text
f n = if natVal (Proxy :: Proxy n) == 3 then Just (f' (N :: N 3)) else Nothing
f' :: N 3 -> Text
f' _ = "3"
| haroldcarr/learn-haskell-coq-ml-etc | haskell/topic/type-level/2019-04-travis-whitaker-discovering-datakinds-at-runtime/src/HC2.hs | unlicense | 1,581 | 0 | 13 | 459 | 461 | 246 | 215 | 34 | 2 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
module DeployR( module DeployR.API
, module DeployR.Types
) where
-- this is an umbrella module
import DeployR.Types -- Data types for the API
import DeployR.API -- the API itself, including client functions
-- picked up from servant tutorial code. TODO reduce if possible
import Control.Monad.Trans.Except (ExceptT, runExceptT)
import Data.Aeson
import Network.HTTP.Client (Manager, newManager, defaultManagerSettings)
import Data.Text(Text)
import qualified Data.Text as T
import qualified Data.Text.IO as T
import Servant.API
import Servant.Client
-- Client requests run in the ClientM monad (once supplied with their payload,
-- a Manager and a BaseUrl).
-- simple http URl with flexible host, port 8000
getBaseUrl :: IO BaseUrl
getBaseUrl = do putStr "Enter DeployR host name: "
baseUrlHost <- getLine
return BaseUrl{..}
where baseUrlPort = 8000
baseUrlScheme = Http
baseUrlPath = ""
getMgr :: IO Manager
getMgr = newManager defaultManagerSettings
itsMe :: LoginData
itsMe = LoginData FormatJSON "admin" "secret" Nothing
loginTest :: IO ()
loginTest = do
mgr <- getMgr
base <- getBaseUrl
result <- runExceptT (login itsMe mgr base)
print result
-- | run a request with the given cookie to authenticate
withCookieRun :: (FromJSONPayload b, Show b) =>
BaseUrl ->
Text ->
(a -> Maybe Text -> Manager -> BaseUrl -> ClientM (DRResponse b)) ->
a ->
IO ()
withCookieRun base cookie req reqData = do
mgr <- getMgr
result <- runExceptT (req reqData (Just $ T.concat ["JSESSIONID=",cookie]) mgr base)
print result
-- we would like an "inSession" combinator to run a ClientM with an
-- authentication header on each action (the httpcookie)
-- | run a request after logging in, and log out afterwards
-- Example use:
-- *DeployR> loggedInAs itsMe (execCode (ExecCode{...} )) $ BaseUrl{...}
loggedInAs :: LoginData ->
(Maybe Text -> Manager -> BaseUrl -> ClientM (DRResponse b)) ->
BaseUrl ->
IO (Either ServantError (DRResponse b))
loggedInAs loginData request baseUrl = do
mgr <- getMgr
runExceptT $ do
loggedIn <- login loginData mgr baseUrl
cookieStr <- maybe (fail "no cookie on login")
(return . T.append "JSESSIONID=")
(drCookie loggedIn)
let sessionCookie = Just cookieStr
result <- request sessionCookie mgr baseUrl
-- problem: if this fails, the logout call is skipped
logout (LogoutData FormatJSON Nothing) sessionCookie mgr baseUrl
return result
| jberthold/deployR-hs | src/DeployR.hs | apache-2.0 | 2,796 | 0 | 16 | 693 | 594 | 311 | 283 | 58 | 1 |
-- main = putStrLn "hello, world outside ghc"
-- main = do
-- putStrLn "Hello, what's your name?"
-- name <- getLine
-- putStrLn ("Hey " ++ name ++ ", you rock!")
-- import Data.Char
--
-- main = do
-- putStrLn "What's your first name?"
-- firstName <- getLine
-- putStrLn "What's your last name"
-- lastName <- getLine
-- let bigFirstName = map toUpper firstName
-- bigLastName = map toUpper lastName
-- putStrLn $ "hey " ++ bigFirstName ++ " " ++ bigLastName ++ ", how are you?"
-- read 打印出逆序列,直到输入 空格
-- main = do
-- line <- getLine
-- if null line
-- then return ()
-- else do
-- putStrLn $ reverseWords line
-- main
-- -- else (do
-- -- putStrLn $ reverseWords line
-- -- main)
--
-- reverseWords :: String -> String
-- reverseWords = unwords. map reverse. words -- function composer
-- -- reverseWords st = unwords (map reverse (words st))
-- main = do
-- return ()
-- return "HAHAHA"
-- line <- getLine
-- return "BLAH BLAH BLAH"
-- return 42
-- putStrLn line
-- main = do
-- a <- return "Cool"
-- b <- return "Haskell"
-- putStrLn $ a ++ " " ++ b
-- -- 类似 let a = "Cool"
-- main = do
-- c <- getChar
-- if c /= ' '
-- then do
-- putChar c
-- main
-- else return ()
--
-- 上述可改写为 when
-- import Control.Monad
--
-- main = do
-- c <- getChar
-- when (c /= ' ') $ do
-- putChar c
-- main
-- main = do
-- rs <- sequence [getLine, getLine, getLine]
-- print rs
-- forever
import Control.Monad
import Data.Char
main = forever $ do
putStr "Give me some input: "
l <- getLine
putStrLn $ map toUpper l
-- forM mapM
--
-- import Control.Monad
--
-- main = do
-- colors <- forM [1,2,3,4] (\a -> do
-- putStrLn $ "Which color do you associate with the number " ++ show a ++ "?"
-- color <- getLine
-- return color)
-- putStrLn "The colors that you associate with 1, 2, 3 and 4 are:"
-- -- mapM putStrLn colors
-- forM colors putStrLn
| sharkspeed/dororis | languages/haskell/LYHGG/9-input-and-output/helloworld.hs | bsd-2-clause | 2,144 | 0 | 9 | 649 | 119 | 95 | 24 | 6 | 1 |
-- | Primitive polynomial sequences.
--
-- <https://en.wikipedia.org/wiki/Primitive_polynomial_(field_theory)>
-- <http://www.ams.org/journals/mcom/1962-16-079/S0025-5718-1962-0148256-1/S0025-5718-1962-0148256-1.pdf>
module PrimitivePolynomial
( sequence
, Gen(next)
) where
import Data.Bits (FiniteBits(finiteBitSize), Bits(shiftL, shiftR, testBit, xor))
import Data.Bool (bool)
import Data.Int (Int8, Int16, Int32, Int64)
import qualified Data.List as List
import Prelude hiding (sequence)
import Data.Word (Word8, Word16, Word32, Word64)
-- | An infinite, cycling enumeration of the positive values in the domain.
sequence :: (Gen α, Num α) => [α]
sequence =
iterate' next 1
iterate' :: (a -> a) -> a -> [a]
iterate' f =
go where go z = z `seq` z : go (f z)
-- | 'FiniteBits' and 'Num' are approximations of 'finite' and 'field'
-- respectively, as we don't have either in Haskell.
class (FiniteBits α, Num α) => Gen α where
next :: α -> α
instance Gen Int8 where
next = gnext [0, 1] 2
instance Gen Int16 where
next = gnext [0, 1] 2
instance Gen Int32 where
next = gnext [0, 3] 2
instance Gen Int64 where
next = gnext [0, 1] 2
instance Gen Word8 where
next = gnext [0, 2, 3, 4] 1
instance Gen Word16 where
next = gnext [0, 2, 3, 5] 1
instance Gen Word32 where
next = gnext [0, 1, 2, 3, 5, 7] 1
instance Gen Word64 where
next = gnext [0, 1, 3, 4] 1
gnext :: (FiniteBits α, Num α) => [Int] -> Int -> α -> α
gnext taps k n =
(bool 0 1 (List.foldl' (\acc tap -> acc `xor` testBit n tap) False taps) `shiftL` shiftSize) + n'
where
shiftSize =
finiteBitSize n - k
n' =
n `shiftR` 1
| supki/primitive-polynomial | src/PrimitivePolynomial.hs | bsd-2-clause | 1,698 | 0 | 14 | 379 | 588 | 339 | 249 | 42 | 1 |
module Exercises7 where
-- Multiple Choice
-- 1. d
-- 2. b
-- 3. d
-- 4. b
-- 5. a
-- Let's write code
-- 1.
-- a)
tensDigit :: Integral a => a -> a
tensDigit x = d
where xLast = fst (x `divMod` 10)
d = snd (xLast `divMod` 10)
-- b) Yes, the type signature is the same
-- c)
hunsDigit :: Integral a => a -> a
hunsDigit x = d
where xLast = fst (x `divMod` 100)
d = snd (xLast `divMod` 10)
-- 2.
-- pattern match:
foldBool3 :: a -> a -> Bool -> a
foldBool3 x _ True = x
foldBool3 _ y False = y
-- case version:
foldBool' :: a -> a -> Bool -> a
foldBool' x y t = case t of
True -> x
False -> y
-- guard version:
-- note, 't = x' works because if t == True, the guard is True
foldBool'' :: a -> a -> Bool -> a
foldBool'' x y t
| t = x
| otherwise = y
-- 3.
g :: (a -> b) -> (a, c) -> (b, c)
g f (a, c) = (f a, c)
-- Main> g show (1, 2)
-- Main> ("1", 2)
-- 4.
-- see: arith4.hs
-- 5.
roundTripPF :: (Show a, Read a) => a -> a
roundTripPF = read . show
-- 6.
roundTrip' :: (Show a, Read b) => a -> b
roundTrip' = read . show
main :: IO ()
main = do
print ((roundTrip' 4) :: Int)
print (id 4) | pdmurray/haskell-book-ex | src/ch7/Exercises7.hs | bsd-3-clause | 1,164 | 0 | 10 | 343 | 445 | 248 | 197 | 30 | 2 |
{-
(c) The AQUA Project, Glasgow University, 1994-1998
\section[ErrsUtils]{Utilities for error reporting}
-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE BangPatterns #-}
module ErrUtils (
-- * Basic types
Validity(..), andValid, allValid, isValid, getInvalids,
Severity(..),
-- * Messages
ErrMsg, errMsgDoc,
ErrDoc, errDoc, errDocImportant, errDocContext, errDocSupplementary,
WarnMsg, MsgDoc,
Messages, ErrorMessages, WarningMessages,
unionMessages,
errMsgSpan, errMsgContext,
errorsFound, isEmptyMessages,
isWarnMsgFatal,
-- ** Formatting
pprMessageBag, pprErrMsgBagWithLoc,
pprLocErrMsg, printBagOfErrors,
formatErrDoc,
-- ** Construction
emptyMessages, mkLocMessage, mkLocMessageAnn, makeIntoWarning,
mkErrMsg, mkPlainErrMsg, mkErrDoc, mkLongErrMsg, mkWarnMsg,
mkPlainWarnMsg,
warnIsErrorMsg, mkLongWarnMsg,
-- * Utilities
doIfSet, doIfSet_dyn,
getCaretDiagnostic,
-- * Dump files
dumpIfSet, dumpIfSet_dyn, dumpIfSet_dyn_printer,
mkDumpDoc, dumpSDoc,
-- * Issuing messages during compilation
putMsg, printInfoForUser, printOutputForUser,
logInfo, logOutput,
errorMsg, warningMsg,
fatalErrorMsg, fatalErrorMsg', fatalErrorMsg'',
compilationProgressMsg,
showPass, withTiming,
debugTraceMsg,
ghcExit,
prettyPrintGhcErrors,
) where
#include "HsVersions.h"
import Bag
import Exception
import Outputable
import Panic
import SrcLoc
import DynFlags
import FastString (unpackFS)
import StringBuffer (hGetStringBuffer, len, lexemeToString)
import System.Directory
import System.Exit ( ExitCode(..), exitWith )
import System.FilePath ( takeDirectory, (</>) )
import Data.List
import qualified Data.Set as Set
import Data.IORef
import Data.Maybe ( fromMaybe )
import Data.Monoid ( mappend )
import Data.Ord
import Data.Time
import Control.Monad
import Control.Monad.IO.Class
import System.IO
import System.IO.Error ( catchIOError )
import GHC.Conc ( getAllocationCounter )
import System.CPUTime
-------------------------
type MsgDoc = SDoc
-------------------------
data Validity
= IsValid -- ^ Everything is fine
| NotValid MsgDoc -- ^ A problem, and some indication of why
isValid :: Validity -> Bool
isValid IsValid = True
isValid (NotValid {}) = False
andValid :: Validity -> Validity -> Validity
andValid IsValid v = v
andValid v _ = v
-- | If they aren't all valid, return the first
allValid :: [Validity] -> Validity
allValid [] = IsValid
allValid (v : vs) = v `andValid` allValid vs
getInvalids :: [Validity] -> [MsgDoc]
getInvalids vs = [d | NotValid d <- vs]
-- -----------------------------------------------------------------------------
-- Basic error messages: just render a message with a source location.
type Messages = (WarningMessages, ErrorMessages)
type WarningMessages = Bag WarnMsg
type ErrorMessages = Bag ErrMsg
unionMessages :: Messages -> Messages -> Messages
unionMessages (warns1, errs1) (warns2, errs2) =
(warns1 `unionBags` warns2, errs1 `unionBags` errs2)
data ErrMsg = ErrMsg {
errMsgSpan :: SrcSpan,
errMsgContext :: PrintUnqualified,
errMsgDoc :: ErrDoc,
-- | This has the same text as errDocImportant . errMsgDoc.
errMsgShortString :: String,
errMsgSeverity :: Severity,
errMsgReason :: WarnReason
}
-- The SrcSpan is used for sorting errors into line-number order
-- | Categorise error msgs by their importance. This is so each section can
-- be rendered visually distinct. See Note [Error report] for where these come
-- from.
data ErrDoc = ErrDoc {
-- | Primary error msg.
errDocImportant :: [MsgDoc],
-- | Context e.g. \"In the second argument of ...\".
errDocContext :: [MsgDoc],
-- | Supplementary information, e.g. \"Relevant bindings include ...\".
errDocSupplementary :: [MsgDoc]
}
errDoc :: [MsgDoc] -> [MsgDoc] -> [MsgDoc] -> ErrDoc
errDoc = ErrDoc
type WarnMsg = ErrMsg
data Severity
= SevOutput
| SevFatal
| SevInteractive
| SevDump
-- ^ Log messagse intended for compiler developers
-- No file/line/column stuff
| SevInfo
-- ^ Log messages intended for end users.
-- No file/line/column stuff.
| SevWarning
| SevError
-- ^ SevWarning and SevError are used for warnings and errors
-- o The message has a file/line/column heading,
-- plus "warning:" or "error:",
-- added by mkLocMessags
-- o Output is intended for end users
instance Show ErrMsg where
show em = errMsgShortString em
pprMessageBag :: Bag MsgDoc -> SDoc
pprMessageBag msgs = vcat (punctuate blankLine (bagToList msgs))
-- | Make an unannotated error message with location info.
mkLocMessage :: Severity -> SrcSpan -> MsgDoc -> MsgDoc
mkLocMessage = mkLocMessageAnn Nothing
-- | Make a possibly annotated error message with location info.
mkLocMessageAnn
:: Maybe String -- ^ optional annotation
-> Severity -- ^ severity
-> SrcSpan -- ^ location
-> MsgDoc -- ^ message
-> MsgDoc
-- Always print the location, even if it is unhelpful. Error messages
-- are supposed to be in a standard format, and one without a location
-- would look strange. Better to say explicitly "<no location info>".
mkLocMessageAnn ann severity locn msg
= sdocWithDynFlags $ \dflags ->
let locn' = if gopt Opt_ErrorSpans dflags
then ppr locn
else ppr (srcSpanStart locn)
-- Add prefixes, like Foo.hs:34: warning:
-- <the warning message>
prefix = locn' <> colon <+>
coloured sevColour sevText <> optAnn
in bold (hang prefix 4 msg)
where
sevColour = colBold `mappend` getSeverityColour severity
sevText =
case severity of
SevWarning -> text "warning:"
SevError -> text "error:"
SevFatal -> text "fatal:"
_ -> empty
-- Add optional information
optAnn = case ann of
Nothing -> text ""
Just i -> text " [" <> coloured sevColour (text i) <> text "]"
getSeverityColour :: Severity -> PprColour
getSeverityColour SevWarning = colMagentaFg
getSeverityColour SevError = colRedFg
getSeverityColour SevFatal = colRedFg
getSeverityColour _ = mempty
getCaretDiagnostic :: Severity -> SrcSpan -> IO MsgDoc
getCaretDiagnostic _ (UnhelpfulSpan _) = pure empty
getCaretDiagnostic severity (RealSrcSpan span) = do
caretDiagnostic <$> getSrcLine (srcSpanFile span) (row - 1)
where
getSrcLine fn i = do
(getLine i <$> readFile' (unpackFS fn))
`catchIOError` \ _ ->
pure Nothing
getLine i contents =
case drop i (lines contents) of
srcLine : _ -> Just srcLine
[] -> Nothing
readFile' fn = do
-- StringBuffer has advantages over readFile:
-- (a) no lazy IO, otherwise IO exceptions may occur in pure code
-- (b) always UTF-8, rather than some system-dependent encoding
-- (Haskell source code must be UTF-8 anyway)
buf <- hGetStringBuffer fn
pure (fix <$> lexemeToString buf (len buf))
-- allow user to visibly see that their code is incorrectly encoded
-- (StringBuffer.nextChar uses \0 to represent undecodable characters)
fix '\0' = '\xfffd'
fix c = c
sevColour = colBold `mappend` getSeverityColour severity
marginColour = colBold `mappend` colBlueFg
row = srcSpanStartLine span
rowStr = show row
multiline = row /= srcSpanEndLine span
stripNewlines = filter (/= '\n')
caretDiagnostic Nothing = empty
caretDiagnostic (Just srcLineWithNewline) =
coloured marginColour (text marginSpace) <>
text ("\n") <>
coloured marginColour (text marginRow) <>
text (" " ++ srcLinePre) <>
coloured sevColour (text srcLineSpan) <>
text (srcLinePost ++ "\n") <>
coloured marginColour (text marginSpace) <>
coloured sevColour (text (" " ++ caretLine))
where
srcLine = stripNewlines srcLineWithNewline
start = srcSpanStartCol span - 1
end | multiline = length srcLine
| otherwise = srcSpanEndCol span - 1
width = max 1 (end - start)
marginWidth = length rowStr
marginSpace = replicate marginWidth ' ' ++ " |"
marginRow = rowStr ++ " |"
(srcLinePre, srcLineRest) = splitAt start srcLine
(srcLineSpan, srcLinePost) = splitAt width srcLineRest
caretEllipsis | multiline = "..."
| otherwise = ""
caretLine = replicate start ' ' ++ replicate width '^' ++ caretEllipsis
makeIntoWarning :: WarnReason -> ErrMsg -> ErrMsg
makeIntoWarning reason err = err
{ errMsgSeverity = SevWarning
, errMsgReason = reason }
-- -----------------------------------------------------------------------------
-- Collecting up messages for later ordering and printing.
mk_err_msg :: DynFlags -> Severity -> SrcSpan -> PrintUnqualified -> ErrDoc -> ErrMsg
mk_err_msg dflags sev locn print_unqual doc
= ErrMsg { errMsgSpan = locn
, errMsgContext = print_unqual
, errMsgDoc = doc
, errMsgShortString = showSDoc dflags (vcat (errDocImportant doc))
, errMsgSeverity = sev
, errMsgReason = NoReason }
mkErrDoc :: DynFlags -> SrcSpan -> PrintUnqualified -> ErrDoc -> ErrMsg
mkErrDoc dflags = mk_err_msg dflags SevError
mkLongErrMsg, mkLongWarnMsg :: DynFlags -> SrcSpan -> PrintUnqualified -> MsgDoc -> MsgDoc -> ErrMsg
-- ^ A long (multi-line) error message
mkErrMsg, mkWarnMsg :: DynFlags -> SrcSpan -> PrintUnqualified -> MsgDoc -> ErrMsg
-- ^ A short (one-line) error message
mkPlainErrMsg, mkPlainWarnMsg :: DynFlags -> SrcSpan -> MsgDoc -> ErrMsg
-- ^ Variant that doesn't care about qualified/unqualified names
mkLongErrMsg dflags locn unqual msg extra = mk_err_msg dflags SevError locn unqual (ErrDoc [msg] [] [extra])
mkErrMsg dflags locn unqual msg = mk_err_msg dflags SevError locn unqual (ErrDoc [msg] [] [])
mkPlainErrMsg dflags locn msg = mk_err_msg dflags SevError locn alwaysQualify (ErrDoc [msg] [] [])
mkLongWarnMsg dflags locn unqual msg extra = mk_err_msg dflags SevWarning locn unqual (ErrDoc [msg] [] [extra])
mkWarnMsg dflags locn unqual msg = mk_err_msg dflags SevWarning locn unqual (ErrDoc [msg] [] [])
mkPlainWarnMsg dflags locn msg = mk_err_msg dflags SevWarning locn alwaysQualify (ErrDoc [msg] [] [])
----------------
emptyMessages :: Messages
emptyMessages = (emptyBag, emptyBag)
isEmptyMessages :: Messages -> Bool
isEmptyMessages (warns, errs) = isEmptyBag warns && isEmptyBag errs
warnIsErrorMsg :: DynFlags -> ErrMsg
warnIsErrorMsg dflags
= mkPlainErrMsg dflags noSrcSpan (text "\nFailing due to -Werror.")
errorsFound :: DynFlags -> Messages -> Bool
errorsFound _dflags (_warns, errs) = not (isEmptyBag errs)
printBagOfErrors :: DynFlags -> Bag ErrMsg -> IO ()
printBagOfErrors dflags bag_of_errors
= sequence_ [ let style = mkErrStyle dflags unqual
in log_action dflags dflags reason sev s style (formatErrDoc dflags doc)
| ErrMsg { errMsgSpan = s,
errMsgDoc = doc,
errMsgSeverity = sev,
errMsgReason = reason,
errMsgContext = unqual } <- sortMsgBag (Just dflags)
bag_of_errors ]
formatErrDoc :: DynFlags -> ErrDoc -> SDoc
formatErrDoc dflags (ErrDoc important context supplementary)
= case msgs of
[msg] -> vcat msg
_ -> vcat $ map starred msgs
where
msgs = filter (not . null) $ map (filter (not . Outputable.isEmpty dflags))
[important, context, supplementary]
starred = (bullet<+>) . vcat
bullet = text $ if DynFlags.useUnicode dflags then "•" else "*"
pprErrMsgBagWithLoc :: Bag ErrMsg -> [SDoc]
pprErrMsgBagWithLoc bag = [ pprLocErrMsg item | item <- sortMsgBag Nothing bag ]
pprLocErrMsg :: ErrMsg -> SDoc
pprLocErrMsg (ErrMsg { errMsgSpan = s
, errMsgDoc = doc
, errMsgSeverity = sev
, errMsgContext = unqual })
= sdocWithDynFlags $ \dflags ->
withPprStyle (mkErrStyle dflags unqual) $
mkLocMessage sev s (formatErrDoc dflags doc)
sortMsgBag :: Maybe DynFlags -> Bag ErrMsg -> [ErrMsg]
sortMsgBag dflags = sortBy (maybeFlip $ comparing errMsgSpan) . bagToList
where maybeFlip :: (a -> a -> b) -> (a -> a -> b)
maybeFlip
| fromMaybe False (fmap reverseErrors dflags) = flip
| otherwise = id
ghcExit :: DynFlags -> Int -> IO ()
ghcExit dflags val
| val == 0 = exitWith ExitSuccess
| otherwise = do errorMsg dflags (text "\nCompilation had errors\n\n")
exitWith (ExitFailure val)
doIfSet :: Bool -> IO () -> IO ()
doIfSet flag action | flag = action
| otherwise = return ()
doIfSet_dyn :: DynFlags -> GeneralFlag -> IO () -> IO()
doIfSet_dyn dflags flag action | gopt flag dflags = action
| otherwise = return ()
-- -----------------------------------------------------------------------------
-- Dumping
dumpIfSet :: DynFlags -> Bool -> String -> SDoc -> IO ()
dumpIfSet dflags flag hdr doc
| not flag = return ()
| otherwise = log_action dflags
dflags
NoReason
SevDump
noSrcSpan
defaultDumpStyle
(mkDumpDoc hdr doc)
-- | a wrapper around 'dumpSDoc'.
-- First check whether the dump flag is set
-- Do nothing if it is unset
dumpIfSet_dyn :: DynFlags -> DumpFlag -> String -> SDoc -> IO ()
dumpIfSet_dyn dflags flag hdr doc
= when (dopt flag dflags) $ dumpSDoc dflags alwaysQualify flag hdr doc
-- | a wrapper around 'dumpSDoc'.
-- First check whether the dump flag is set
-- Do nothing if it is unset
--
-- Unlike 'dumpIfSet_dyn',
-- has a printer argument but no header argument
dumpIfSet_dyn_printer :: PrintUnqualified
-> DynFlags -> DumpFlag -> SDoc -> IO ()
dumpIfSet_dyn_printer printer dflags flag doc
= when (dopt flag dflags) $ dumpSDoc dflags printer flag "" doc
mkDumpDoc :: String -> SDoc -> SDoc
mkDumpDoc hdr doc
= vcat [blankLine,
line <+> text hdr <+> line,
doc,
blankLine]
where
line = text (replicate 20 '=')
-- | Write out a dump.
-- If --dump-to-file is set then this goes to a file.
-- otherwise emit to stdout.
--
-- When @hdr@ is empty, we print in a more compact format (no separators and
-- blank lines)
--
-- The 'DumpFlag' is used only to choose the filename to use if @--dump-to-file@
-- is used; it is not used to decide whether to dump the output
dumpSDoc :: DynFlags -> PrintUnqualified -> DumpFlag -> String -> SDoc -> IO ()
dumpSDoc dflags print_unqual flag hdr doc
= do let mFile = chooseDumpFile dflags flag
dump_style = mkDumpStyle print_unqual
case mFile of
Just fileName
-> do
let gdref = generatedDumps dflags
gd <- readIORef gdref
let append = Set.member fileName gd
mode = if append then AppendMode else WriteMode
unless append $
writeIORef gdref (Set.insert fileName gd)
createDirectoryIfMissing True (takeDirectory fileName)
handle <- openFile fileName mode
-- We do not want the dump file to be affected by
-- environment variables, but instead to always use
-- UTF8. See:
-- https://ghc.haskell.org/trac/ghc/ticket/10762
hSetEncoding handle utf8
doc' <- if null hdr
then return doc
else do t <- getCurrentTime
let d = text (show t)
$$ blankLine
$$ doc
return $ mkDumpDoc hdr d
defaultLogActionHPrintDoc dflags handle doc' dump_style
hClose handle
-- write the dump to stdout
Nothing -> do
let (doc', severity)
| null hdr = (doc, SevOutput)
| otherwise = (mkDumpDoc hdr doc, SevDump)
log_action dflags dflags NoReason severity noSrcSpan dump_style doc'
-- | Choose where to put a dump file based on DynFlags
--
chooseDumpFile :: DynFlags -> DumpFlag -> Maybe FilePath
chooseDumpFile dflags flag
| gopt Opt_DumpToFile dflags || flag == Opt_D_th_dec_file
, Just prefix <- getPrefix
= Just $ setDir (prefix ++ (beautifyDumpName flag))
| otherwise
= Nothing
where getPrefix
-- dump file location is being forced
-- by the --ddump-file-prefix flag.
| Just prefix <- dumpPrefixForce dflags
= Just prefix
-- dump file location chosen by DriverPipeline.runPipeline
| Just prefix <- dumpPrefix dflags
= Just prefix
-- we haven't got a place to put a dump file.
| otherwise
= Nothing
setDir f = case dumpDir dflags of
Just d -> d </> f
Nothing -> f
-- | Build a nice file name from name of a 'DumpFlag' constructor
beautifyDumpName :: DumpFlag -> String
beautifyDumpName Opt_D_th_dec_file = "th.hs"
beautifyDumpName flag
= let str = show flag
suff = case stripPrefix "Opt_D_" str of
Just x -> x
Nothing -> panic ("Bad flag name: " ++ str)
dash = map (\c -> if c == '_' then '-' else c) suff
in dash
-- -----------------------------------------------------------------------------
-- Outputting messages from the compiler
-- We want all messages to go through one place, so that we can
-- redirect them if necessary. For example, when GHC is used as a
-- library we might want to catch all messages that GHC tries to
-- output and do something else with them.
ifVerbose :: DynFlags -> Int -> IO () -> IO ()
ifVerbose dflags val act
| verbosity dflags >= val = act
| otherwise = return ()
errorMsg :: DynFlags -> MsgDoc -> IO ()
errorMsg dflags msg
= log_action dflags dflags NoReason SevError noSrcSpan (defaultErrStyle dflags) msg
warningMsg :: DynFlags -> MsgDoc -> IO ()
warningMsg dflags msg
= log_action dflags dflags NoReason SevWarning noSrcSpan (defaultErrStyle dflags) msg
fatalErrorMsg :: DynFlags -> MsgDoc -> IO ()
fatalErrorMsg dflags msg = fatalErrorMsg' (log_action dflags) dflags msg
fatalErrorMsg' :: LogAction -> DynFlags -> MsgDoc -> IO ()
fatalErrorMsg' la dflags msg =
la dflags NoReason SevFatal noSrcSpan (defaultErrStyle dflags) msg
fatalErrorMsg'' :: FatalMessager -> String -> IO ()
fatalErrorMsg'' fm msg = fm msg
compilationProgressMsg :: DynFlags -> String -> IO ()
compilationProgressMsg dflags msg
= ifVerbose dflags 1 $
logOutput dflags defaultUserStyle (text msg)
showPass :: DynFlags -> String -> IO ()
showPass dflags what
= ifVerbose dflags 2 $
logInfo dflags defaultUserStyle (text "***" <+> text what <> colon)
-- | Time a compilation phase.
--
-- When timings are enabled (e.g. with the @-v2@ flag), the allocations
-- and CPU time used by the phase will be reported to stderr. Consider
-- a typical usage: @withTiming getDynFlags (text "simplify") force pass@.
-- When timings are enabled the following costs are included in the
-- produced accounting,
--
-- - The cost of executing @pass@ to a result @r@ in WHNF
-- - The cost of evaluating @force r@ to WHNF (e.g. @()@)
--
-- The choice of the @force@ function depends upon the amount of forcing
-- desired; the goal here is to ensure that the cost of evaluating the result
-- is, to the greatest extent possible, included in the accounting provided by
-- 'withTiming'. Often the pass already sufficiently forces its result during
-- construction; in this case @const ()@ is a reasonable choice.
-- In other cases, it is necessary to evaluate the result to normal form, in
-- which case something like @Control.DeepSeq.rnf@ is appropriate.
--
-- To avoid adversely affecting compiler performance when timings are not
-- requested, the result is only forced when timings are enabled.
withTiming :: MonadIO m
=> m DynFlags -- ^ A means of getting a 'DynFlags' (often
-- 'getDynFlags' will work here)
-> SDoc -- ^ The name of the phase
-> (a -> ()) -- ^ A function to force the result
-- (often either @const ()@ or 'rnf')
-> m a -- ^ The body of the phase to be timed
-> m a
withTiming getDFlags what force_result action
= do dflags <- getDFlags
if verbosity dflags >= 2
then do liftIO $ logInfo dflags defaultUserStyle
$ text "***" <+> what <> colon
alloc0 <- liftIO getAllocationCounter
start <- liftIO getCPUTime
!r <- action
() <- pure $ force_result r
end <- liftIO getCPUTime
alloc1 <- liftIO getAllocationCounter
-- recall that allocation counter counts down
let alloc = alloc0 - alloc1
liftIO $ logInfo dflags defaultUserStyle
(text "!!!" <+> what <> colon <+> text "finished in"
<+> doublePrec 2 (realToFrac (end - start) * 1e-9)
<+> text "milliseconds"
<> comma
<+> text "allocated"
<+> doublePrec 3 (realToFrac alloc / 1024 / 1024)
<+> text "megabytes")
pure r
else action
debugTraceMsg :: DynFlags -> Int -> MsgDoc -> IO ()
debugTraceMsg dflags val msg = ifVerbose dflags val $
logInfo dflags defaultDumpStyle msg
putMsg :: DynFlags -> MsgDoc -> IO ()
putMsg dflags msg = logInfo dflags defaultUserStyle msg
printInfoForUser :: DynFlags -> PrintUnqualified -> MsgDoc -> IO ()
printInfoForUser dflags print_unqual msg
= logInfo dflags (mkUserStyle print_unqual AllTheWay) msg
printOutputForUser :: DynFlags -> PrintUnqualified -> MsgDoc -> IO ()
printOutputForUser dflags print_unqual msg
= logOutput dflags (mkUserStyle print_unqual AllTheWay) msg
logInfo :: DynFlags -> PprStyle -> MsgDoc -> IO ()
logInfo dflags sty msg
= log_action dflags dflags NoReason SevInfo noSrcSpan sty msg
logOutput :: DynFlags -> PprStyle -> MsgDoc -> IO ()
-- ^ Like 'logInfo' but with 'SevOutput' rather then 'SevInfo'
logOutput dflags sty msg
= log_action dflags dflags NoReason SevOutput noSrcSpan sty msg
prettyPrintGhcErrors :: ExceptionMonad m => DynFlags -> m a -> m a
prettyPrintGhcErrors dflags
= ghandle $ \e -> case e of
PprPanic str doc ->
pprDebugAndThen dflags panic (text str) doc
PprSorry str doc ->
pprDebugAndThen dflags sorry (text str) doc
PprProgramError str doc ->
pprDebugAndThen dflags pgmError (text str) doc
_ ->
liftIO $ throwIO e
-- | Checks if given 'WarnMsg' is a fatal warning.
isWarnMsgFatal :: DynFlags -> WarnMsg -> Bool
isWarnMsgFatal dflags ErrMsg{errMsgReason = Reason wflag}
= wopt_fatal wflag dflags
isWarnMsgFatal dflags _ = gopt Opt_WarnIsError dflags
| olsner/ghc | compiler/main/ErrUtils.hs | bsd-3-clause | 24,597 | 0 | 24 | 7,387 | 5,276 | 2,750 | 2,526 | 424 | 6 |
{-# LANGUAGE CPP #-}
#if __GLASGOW_HASKELL__ >= 709
{-# LANGUAGE AutoDeriveTypeable #-}
#endif
-----------------------------------------------------------------------------
-- |
-- Module : Data.Functor.Reverse
-- Copyright : (c) Russell O'Connor 2009
-- License : BSD-style (see the file LICENSE)
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- Making functors whose elements are notionally in the reverse order
-- from the original functor.
-----------------------------------------------------------------------------
module Data.Functor.Reverse (
Reverse(..),
) where
import Control.Applicative.Backwards
import Data.Functor.Classes
import Prelude hiding (foldr, foldr1, foldl, foldl1)
import Control.Applicative
import Data.Foldable
import Data.Traversable
import Data.Monoid
-- | The same functor, but with 'Foldable' and 'Traversable' instances
-- that process the elements in the reverse order.
newtype Reverse f a = Reverse { getReverse :: f a }
instance (Eq1 f, Eq a) => Eq (Reverse f a) where
Reverse x == Reverse y = eq1 x y
instance (Ord1 f, Ord a) => Ord (Reverse f a) where
compare (Reverse x) (Reverse y) = compare1 x y
instance (Read1 f, Read a) => Read (Reverse f a) where
readsPrec = readsData $ readsUnary1 "Reverse" Reverse
instance (Show1 f, Show a) => Show (Reverse f a) where
showsPrec d (Reverse x) = showsUnary1 "Reverse" d x
instance (Eq1 f) => Eq1 (Reverse f) where eq1 = (==)
instance (Ord1 f) => Ord1 (Reverse f) where compare1 = compare
instance (Read1 f) => Read1 (Reverse f) where readsPrec1 = readsPrec
instance (Show1 f) => Show1 (Reverse f) where showsPrec1 = showsPrec
-- | Derived instance.
instance (Functor f) => Functor (Reverse f) where
fmap f (Reverse a) = Reverse (fmap f a)
-- | Derived instance.
instance (Applicative f) => Applicative (Reverse f) where
pure a = Reverse (pure a)
Reverse f <*> Reverse a = Reverse (f <*> a)
-- | Derived instance.
instance (Alternative f) => Alternative (Reverse f) where
empty = Reverse empty
Reverse x <|> Reverse y = Reverse (x <|> y)
-- | Fold from right to left.
instance (Foldable f) => Foldable (Reverse f) where
foldMap f (Reverse t) = getDual (foldMap (Dual . f) t)
foldr f z (Reverse t) = foldl (flip f) z t
foldl f z (Reverse t) = foldr (flip f) z t
foldr1 f (Reverse t) = foldl1 (flip f) t
foldl1 f (Reverse t) = foldr1 (flip f) t
-- | Traverse from right to left.
instance (Traversable f) => Traversable (Reverse f) where
traverse f (Reverse t) =
fmap Reverse . forwards $ traverse (Backwards . f) t
sequenceA (Reverse t) =
fmap Reverse . forwards $ sequenceA (fmap Backwards t)
| DavidAlphaFox/ghc | libraries/transformers/Data/Functor/Reverse.hs | bsd-3-clause | 2,744 | 0 | 10 | 549 | 879 | 464 | 415 | 42 | 0 |
{-# LANGUAGE MultiParamTypeClasses, TypeSynonymInstances, FlexibleInstances, DeriveDataTypeable #-}
{-
** *********************************************************************
* *
* (c) Kathleen Fisher <[email protected]> *
* John Launchbury <[email protected]> *
* *
************************************************************************
-}
module Language.Pads.CoreBaseTypes where
import Language.Pads.Generic
import Language.Pads.MetaData
import Language.Pads.PadsParser
import Language.Pads.RegExp
import qualified Language.Pads.Source as S
import qualified Language.Pads.Errors as E
import qualified Data.ByteString as B
import Language.Pads.LazyList
import Language.Haskell.TH as TH
import Language.Haskell.TH.Syntax
import Data.Data
import qualified Data.Map as M
import qualified Data.List as List
import Data.Word
import Data.Char as Char
import Data.Int
import Text.PrettyPrint.Mainland as PP
import Control.Monad (when)
-----------------------------------------------------------------
--type Char
type Char_md = Base_md
char_parseM :: PadsParser (Char, Base_md)
char_parseM =
handleEOF def "Char" $
handleEOR def "Char" $ do
c <- takeHeadP
returnClean c
instance Pads Char Base_md where
parsePP = char_parseM
printFL = char_printFL
char_printFL :: (Char, Base_md) -> FList
char_printFL (c,bmd) = addString [c]
-----------------------------------------------------------------
--type Int
type Int_md = Base_md
int_parseM :: PadsParser (Int,Base_md)
int_parseM =
handleEOF def "Int" $
handleEOR def "Int" $ do
c <- peekHeadP
let isNeg = (c == '-')
when isNeg (takeHeadP >> return ())
digits <- satisfy Char.isDigit
if not (null digits)
then returnClean (digitListToInt isNeg digits)
else returnError def (E.FoundWhenExpecting (mkStr c) "Int")
instance Pads Int Base_md where
parsePP = int_parseM
printFL = int_printFL
int_printFL :: (Int, Base_md) -> FList
int_printFL (i, bmd) = fshow i
-----------------------------------------------------------------
--type Double
type Double_md = Base_md
double_parseM :: PadsParser (Double,Base_md)
double_parseM =
handleEOF def "Double" $
handleEOR def "Double" $ do
-- Get leading sign
c <- peekHeadP
let isNeg = (c == '-')
when isNeg (takeHeadP >> return ())
let sign = if isNeg then "-" else ""
-- Get digits before any dot
digits1 <- satisfy Char.isDigit
-- Get optional dot
d <- peekHeadP
let hasDot = (d == '.')
when hasDot (takeHeadP >> return ())
let dec = if hasDot then "." else ""
-- Get digits after dot
digits2 <- satisfy Char.isDigit
-- Get optional exponent marker
e <- peekHeadP
let hasExp = (e == 'e')
when hasExp (takeHeadP >> return ())
let exp = if hasExp then "e" else ""
-- Get optional exponent sign
es <- peekHeadP
let hasESign = (es == '-')
when hasESign (takeHeadP >> return ())
let expSign = if hasESign then "-" else ""
-- Get digits in the exponent
digits3 <- satisfy Char.isDigit
-- As long as the double had digits
if not (null digits1)
then returnClean (read (sign ++digits1++dec++digits2++exp++expSign++digits3))
else returnError def (E.FoundWhenExpecting (mkStr c) "Double")
instance Pads Double Base_md where
parsePP = double_parseM
printFL = double_printFL
double_printFL :: (Double, Base_md) -> FList
double_printFL (d, bmd) = fshow d
-----------------------------------------------------------------
type Try a = a
type Try_md a_md = (Base_md, a_md)
try_parseM p = parseTry p
try_printFL p = printFL p
-----------------------------------------------------------------
type Digit = Int
type Digit_md = Base_md
digit_parseM :: PadsParser (Digit, Base_md)
digit_parseM =
handleEOF def "Pdigit" $
handleEOR def "Pdigit" $ do
c <- takeHeadP
if isDigit c
then returnClean (digitToInt c)
else returnError def (E.FoundWhenExpecting [c] "Digit")
digit_printFL :: (Digit, Base_md) -> FList
digit_printFL (i, bmd) = fshow i
-----------------------------------------------------------------
newtype Text = Text S.RawStream
deriving (Eq, Show, Data, Typeable, Ord)
type Text_md = Base_md
text_parseM :: PadsParser (Text, Base_md)
text_parseM = do
document <- getAllBinP
returnClean (Text document)
instance Pretty Text where
ppr (Text str) = text "ASCII"
instance Pads Text Base_md where
parsePP = text_parseM
printFL = text_printFL
text_printFL :: (Text, Base_md) -> FList
text_printFL (Text str, bmd) = addBString str
-----------------------------------------------------------------
newtype Binary = Binary S.RawStream
deriving (Eq, Show, Data, Typeable, Ord)
type Binary_md = Base_md
binary_parseM :: PadsParser (Binary, Base_md)
binary_parseM = do
document <- getAllBinP
returnClean (Binary document)
instance Pretty Binary where
ppr (Binary str) = text "Binary"
instance Pads Binary Base_md where
parsePP = binary_parseM
printFL = binary_printFL
binary_printFL :: (Binary, Base_md) -> FList
binary_printFL (Binary bstr, bmd) = addBString bstr
-----------------------------------------------------------------
type StringC = String
type StringC_md = Base_md
stringC_parseM :: Char -> PadsParser (StringC, Base_md)
stringC_parseM c =
handleEOF (stringC_def c) "StringC" $
handleEOR (stringC_def c) "StringC" $ do
str <- satisfy (\c'-> c /= c')
returnClean str
stringC_def c = ""
stringC_printFL :: Char -> (StringC, Base_md) -> FList
stringC_printFL c (str, bmd) = addString str
-----------------------------------------------------------------
type StringFW = String
type StringFW_md = Base_md
stringFW_parseM :: Int -> PadsParser (StringFW, Base_md)
stringFW_parseM 0 = returnClean ""
stringFW_parseM n =
handleEOF (stringFW_def n) "StringFW" $
handleEOR (stringFW_def n) "StringFW" $ do
str <- takeP n
if (length str) == n
then returnClean str
else returnError (stringFW_def n) (E.Insufficient (length str) n)
stringFW_def n = take n (repeat 'X')
stringFW_printFL :: Int -> (StringFW, Base_md) -> FList
stringFW_printFL n (str, bmd) = addString (take n str)
-----------------------------------------------------------------
type StringME = String
type StringME_md = Base_md
stringME_parseM :: RE -> PadsParser (StringME, Base_md)
stringME_parseM re =
handleEOF (stringME_def re) "StringME" $ do
match <- regexMatchP re
case match of
Just str -> returnClean str
Nothing -> returnError (stringME_def re) (E.RegexMatchFail (show re))
stringME_def (RE re) = "" -- should invert the re
stringME_def (REd re d) = d
stringME_printFL :: RE -> (StringME, Base_md) -> FList
stringME_printFL re (str, bmd) = addString str
-- We're not likely to check that str matches re
-----------------------------------------------------------------
type StringSE = String
type StringSE_md = Base_md
stringSE_parseM :: RE -> PadsParser (StringSE, Base_md)
stringSE_parseM re =
checkEOF (stringSE_def re) "StringSE" $
checkEOR (stringSE_def re) "StringSE" $ do
match <- regexStopP re
case match of
Just str -> returnClean str
Nothing -> returnError (stringSE_def re) (E.RegexMatchFail (show re))
stringSE_def (RE re) = "" -- should invert the re
stringSE_def (REd re d) = d
stringSE_printFL :: RE -> (StringSE, Base_md) -> FList
stringSE_printFL s (str, bmd) = addString str
-----------------------------------------------------------------
type StringP = String
type StringP_md = Base_md
stringP_parseM :: (Char -> Bool) -> PadsParser (StringP, Base_md)
stringP_parseM p =
handleEOF (stringP_def p) "StringP" $
handleEOR (stringP_def p) "StringP" $ do
str <- satisfy p
returnClean str
stringP_def _ = ""
stringP_printFL :: (Char -> Bool) -> (StringP, Base_md) -> FList
stringP_printFL p (str, bmd) = addString str
-----------------------------------------------------------------
type StringPESC = String
type StringPESC_md = Base_md
stringPESC_parseM :: (Bool, (Char, [Char])) -> PadsParser(StringPESC, Base_md)
stringPESC_parseM arg @ (endIfEOR, (escape, stops)) =
let (doEOF, doEOR) = if endIfEOR then (checkEOF, checkEOR) else (handleEOF, handleEOR)
in
doEOF "" "StringPESC" $
doEOR "" "StringPESC" $ do
{ c1 <- peekHeadP
; if c1 `elem` stops then
returnClean ""
else if c1 == escape then do
{ takeHeadP
; doEOF [c1] "StringPESC" $
doEOR [c1] "StringPESC" $ do
{ c2 <- takeHeadP
; if (c2 == escape) || (c2 `elem` stops) then do
{ (rest, rest_md) <- stringPESC_parseM arg
; return (c2:rest, rest_md)
}
else do
{ (rest, rest_md) <- stringPESC_parseM arg
; return (c1:c2:rest, rest_md)
}
}
} else do
{ c1 <- takeHeadP
; (rest, rest_md) <- stringPESC_parseM arg
; return (c1:rest, rest_md)
}
}
stringPESC_printFL :: (Bool, (Char, [Char])) -> (StringPESC, Base_md) -> FList
stringPESC_printFL (_, (escape, stops)) (str, bmd) =
let replace c = if c `elem` stops then escape : [c] else [c]
newStr = concat (map replace str)
in addString newStr
-----------------------------------------------------------------
class LitParse a where
litParse :: a -> PadsParser ((), Base_md)
litPrint :: a -> FList
strLit_parseM :: String -> PadsParser ((), Base_md)
strLit_parseM s =
handleEOF () s $
handleEOR () s $ do
match <- scanStrP s
case match of
Just [] -> returnClean ()
Just junk -> returnError () (E.ExtraBeforeLiteral s)
Nothing -> returnError () (E.MissingLiteral s)
instance LitParse Char where
litParse = charLit_parseM
litPrint = charLit_printFL
charLit_parseM :: Char -> PadsParser ((), Base_md)
charLit_parseM c =
handleEOF () (mkStr c) $
handleEOR () (mkStr c) $ do
c' <- takeHeadP
if c == c' then returnClean () else do
foundIt <- scanP c
returnError () (if foundIt
then E.ExtraBeforeLiteral (mkStr c)
else E.MissingLiteral (mkStr c))
instance LitParse String where
litParse = strLit_parseM
litPrint = strLit_printFL
instance LitParse RE where
litParse = reLit_parseM
litPrint = reLit_printFL
reLit_parseM :: RE -> PadsParser ((), Base_md)
reLit_parseM re = do
(match, md) <- stringME_parseM re
if numErrors md == 0
then return ((), md)
else badReturn ((), md)
type EOF_md = Base_md
eof_parseM :: PadsParser ((), Base_md)
eof_parseM = do
isEof <- isEOFP
if isEof then returnClean ()
else returnError () (E.ExtraBeforeLiteral "Eof")
type EOR_md = Base_md
eor_parseM :: PadsParser ((), Base_md)
eor_parseM =
handleEOF () "EOR" $ do
isEor <- isEORP
if isEor then doLineEnd
else returnError () (E.LineError "Expecting EOR")
reLit_printFL :: RE -> FList
reLit_printFL (RE re) = addString "--REGEXP LITERAL-- "
reLit_printFL (REd re def) = addString def
charLit_printFL :: Char -> FList
charLit_printFL c = addString [c]
strLit_printFL :: String -> FList
strLit_printFL str = addString str
eorLit_printFL :: FList
eorLit_printFL = printEOR
eofLit_printFL :: FList
eofLit_printFL = printEOF
-----------------------------------------------------------------
newtype Void = Void ()
deriving (Eq, Show, Data, Typeable, Ord)
type Void_md = Base_md
void_parseM :: PadsParser (Void, Base_md)
void_parseM = returnClean (Void ())
instance Pads Void Base_md where
parsePP = void_parseM
printFL = void_printFL
void_printFL :: a -> FList
void_printFL v = nil
pstrLit_printQ :: String -> FList
pstrLit_printQ str = addString str
tuple_printQ :: (String, String, String) -> FList
tuple_printQ (s1,s2,s3) = pstrLit_printQ s1 +++ pstrLit_printQ s2 +++ pstrLit_printQ s3
rtuple_printQ :: (String, String, String) -> FList
rtuple_printQ ss = tuple_printQ ss +++ (addString ['\n'])
list_printQ :: [(String,String,String)] -> FList
list_printQ [] = nil
list_printQ (item:items) = rtuple_printQ item +++ list_printQ items
----------------------------------
handleEOF val str p
= do { isEof <- isEOFP
; if isEof then
returnError val (E.FoundWhenExpecting "EOF" str)
else p}
handleEOR val str p
= do { isEor <- isEORP
; if isEor then
returnError val (E.FoundWhenExpecting "EOR" str)
else p}
checkEOF val str p
= do { isEof <- isEOFP
; if isEof then
returnClean val
else p}
checkEOR val str p
= do { isEor <- isEORP
; if isEor then
returnClean val
else p}
----------------------------------
-- BINARY TYPES --
----------------------------------
type Bytes = S.RawStream
type Bytes_md = Base_md
bytes_parseM :: Int -> PadsParser (Bytes,Bytes_md)
bytes_parseM n =
handleEOF (def1 n) "Bytes" $
handleEOR (def1 n) "Bytes" $ do
bytes <- takeBytesP n
if B.length bytes == n
then returnClean bytes
else returnError (bytes_default n) (E.Insufficient (B.length bytes) n)
bytes_default :: Int -> Bytes
bytes_default n = B.pack (replicate n (fromInt 0))
bytes_printFL :: Int -> (Bytes, Bytes_md) -> FList
bytes_printFL i (bs, bmd) = addBString bs
instance Pads1 Int Bytes Bytes_md where
parsePP1 = bytes_parseM
printFL1 = bytes_printFL
---- All the others can be derived from this: moved to BaseTypes.hs
{- Helper functions -}
mkStr c = "'" ++ [c] ++ "'"
| athleens/pads-haskell | Language/Pads/CoreBaseTypes.hs | bsd-3-clause | 13,896 | 1 | 24 | 3,154 | 4,145 | 2,197 | 1,948 | 336 | 6 |
{-# LANGUAGE NoImplicitPrelude #-}
import Control.Exception (onException)
import Control.Monad (void)
import Data.List (isSuffixOf)
import System.Environment (getArgs, getProgName)
import System.Process
import Prelude.Compat
chomp :: String -> String
chomp str
| "\n" `isSuffixOf` str = init str
| otherwise = str
git :: String -> [String] -> IO String
git cmd args = readProcess "git" (cmd:args) ""
git_ :: String -> [String] -> IO ()
git_ cmd = void . git cmd
revParse :: [String] -> IO String
revParse args = chomp <$> git "rev-parse" args
reset :: [String] -> IO ()
reset = git_ "reset"
logErrors :: IO a -> String -> IO a
logErrors action msg = action `onException` putStrLn msg
main :: IO ()
main =
do
progName <- getProgName
args <- getArgs
let destRefSpec =
case args of
[] -> "@{u}"
[refSpec] -> refSpec
_ -> fail $ "Usage: " ++ progName ++ "[refspec]\n if refspec is not provided, the remote tracked branch is used"
destRef <- revParse [destRefSpec]
origPos <- revParse ["HEAD"]
git_ "commit" ["--allow-empty", "-mSTAGING"]
staging <- revParse ["HEAD"]
git_ "commit" ["--allow-empty", "-amUNSTAGED"]
unstaged <- revParse ["HEAD"]
let restore =
do
reset ["--hard", unstaged]
reset ["--mixed", staging]
reset ["--soft", origPos]
return ()
(`onException` restore) $
do
reset ["--hard", destRef]
`logErrors` ("Failed to jump to " ++ show destRef)
git_ "cherry-pick" ["--allow-empty", staging]
git_ "cherry-pick" ["--allow-empty", unstaged]
reset ["--mixed", "HEAD^"]
reset ["--soft", "HEAD^"]
return ()
| Peaker/git-jump | git-jump.hs | bsd-3-clause | 1,968 | 0 | 15 | 687 | 581 | 294 | 287 | 52 | 3 |
-- |
-- Module : Crypto.Random
-- License : BSD-style
-- Maintainer : Vincent Hanquez <[email protected]>
-- Stability : stable
-- Portability : good
--
module Crypto.Random
(
-- * Deterministic instances
ChaChaDRG
, SystemDRG
-- * Deterministic Random class
, getSystemDRG
, drgNew
, drgNewTest
, withDRG
, withRandomBytes
, DRG(..)
-- * Random abstraction
, MonadRandom(..)
, MonadPseudoRandom
) where
import Crypto.Random.Types
import Crypto.Random.ChaChaDRG
import Crypto.Random.SystemDRG
import Data.ByteArray (ByteArray, ScrubbedBytes)
import Crypto.Internal.Imports
-- | Create a new DRG from system entropy
drgNew :: MonadRandom randomly => randomly ChaChaDRG
drgNew = do
b <- getRandomBytes 40
return $ initialize (b :: ScrubbedBytes)
-- | Create a new DRG from 5 Word64.
--
-- This is a convenient interface to create deterministic interface
-- for quickcheck style testing.
--
-- It can also be used in other contexts provided the input
-- has been properly randomly generated.
drgNewTest :: (Word64, Word64, Word64, Word64, Word64) -> ChaChaDRG
drgNewTest = initializeWords
-- | Generate @len random bytes and mapped the bytes to the function @f.
--
-- This is equivalent to use Control.Arrow 'first' with 'randomBytesGenerate'
withRandomBytes :: (ByteArray ba, DRG g) => g -> Int -> (ba -> a) -> (a, g)
withRandomBytes rng len f = (f bs, rng')
where (bs, rng') = randomBytesGenerate len rng
| nomeata/cryptonite | Crypto/Random.hs | bsd-3-clause | 1,492 | 0 | 10 | 304 | 264 | 162 | 102 | 26 | 1 |
--
-- >>> Main (prep) <<<
--
-- This program 'Haskelises' the hub help text.
--
-- (c) 2011-2012 Chris Dornan
module Main(main) where
import System.Locale
import Data.Time
import Text.Printf
main :: IO ()
main =
do cts <- readFile "help.txt"
writeFile "Hub/HelpText.hs" $ mk_text_mod "Hub.HelpText" "helpText" cts
mk_text_mod :: String -> String -> String -> String
mk_text_mod mn fn cts =
case lines cts of
[] -> error "that is strange, the text file is empty"
ln:lns -> unlines $ pre ln ++ foldr mdl pst lns
where
pre ln = [ printf "module %s(%s) where" mn fn
, ""
, printf "%s :: String" fn
, printf "%s = unlines" fn
, printf " [ %s" $ show ln
]
mdl ln t = [ printf " , %s" $ show ln
] ++ t
pst = [ " ]"
]
| Lainepress/hub-src | prep.hs | bsd-3-clause | 979 | 0 | 10 | 412 | 234 | 120 | 114 | 21 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.