code
stringlengths 5
1.03M
| repo_name
stringlengths 5
90
| path
stringlengths 4
158
| license
stringclasses 15
values | size
int64 5
1.03M
| n_ast_errors
int64 0
53.9k
| ast_max_depth
int64 2
4.17k
| n_whitespaces
int64 0
365k
| n_ast_nodes
int64 3
317k
| n_ast_terminals
int64 1
171k
| n_ast_nonterminals
int64 1
146k
| loc
int64 -1
37.3k
| cycloplexity
int64 -1
1.31k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
{-# LANGUAGE CPP #-}
{-# LANGUAGE OverloadedStrings #-}
--------------------------------------------------------------------------------
-- See end of this file for licence information.
--------------------------------------------------------------------------------
-- |
-- Module : Namespace
-- Copyright : (c) 2003, Graham Klyne, 2009 Vasili I Galchin,
-- 2011, 2012, 2014 Douglas Burke
-- License : GPL V2
--
-- Maintainer : Douglas Burke
-- Stability : experimental
-- Portability : CPP, OverloadedStrings
--
-- This module defines algebraic datatypes for namespaces and scoped names.
--
-- For these purposes, a namespace is a prefix and URI used to identify
-- a namespace (cf. XML namespaces), and a scoped name is a name that
-- is scoped by a specified namespace.
--
--------------------------------------------------------------------------------
module Swish.Namespace
( Namespace
, makeNamespace, makeNamespaceQName
, getNamespacePrefix, getNamespaceURI, getNamespaceTuple
-- , nullNamespace
, ScopedName
, getScopeNamespace, getScopeLocal
, getScopePrefix, getScopeURI
, getQName, getScopedNameURI
, matchName
, makeScopedName
, makeQNameScopedName
, makeURIScopedName
, makeNSScopedName
, nullScopedName
, namespaceToBuilder
)
where
import Swish.QName (QName, LName, newQName, getLName, emptyLName, getQNameURI, getNamespace, getLocalName)
import Data.Maybe (fromMaybe)
#if (!defined(__GLASGOW_HASKELL__)) || (__GLASGOW_HASKELL__ < 710)
import Data.Monoid (Monoid(..))
#endif
import Data.Ord (comparing)
import Data.String (IsString(..))
import Network.URI (URI(..), parseURIReference, nullURI)
import qualified Data.Text as T
import qualified Data.Text.Lazy.Builder as B
------------------------------------------------------------
-- Namespace, having a prefix and a URI
------------------------------------------------------------
-- |A NameSpace value consists of an optional prefix and a corresponding URI.
--
data Namespace = Namespace (Maybe T.Text) URI
-- data Namespace = Namespace (Maybe T.Text) !URI
-- TODO: look at interning the URI
-- | Returns the prefix stored in the name space.
getNamespacePrefix :: Namespace -> Maybe T.Text
getNamespacePrefix (Namespace p _) = p
-- | Returns the URI stored in the name space.
getNamespaceURI :: Namespace -> URI
getNamespaceURI (Namespace _ u) = u
-- | Convert the name space to a (prefix, URI) tuple.
getNamespaceTuple :: Namespace -> (Maybe T.Text, URI)
getNamespaceTuple (Namespace p u) = (p, u)
-- | Equality is defined by the URI, not by the prefix
-- (so the same URI with different prefixes will be
-- considered to be equal).
instance Eq Namespace where
(Namespace _ u1) == (Namespace _ u2) = u1 == u2
instance Ord Namespace where
-- using show for the URI is wasteful
(Namespace a1 b1) `compare` (Namespace a2 b2) =
(a1, show b1) `compare` (a2, show b2)
instance Show Namespace where
show (Namespace (Just p) u) = show p ++ ":<" ++ show u ++ ">"
show (Namespace _ u) = "<" ++ show u ++ ">"
-- | Create a name space from a URI and an optional prefix label.
makeNamespace ::
Maybe T.Text -- ^ optional prefix.
-> URI -- ^ URI.
-> Namespace
makeNamespace = Namespace
-- | Create a qualified name by combining the URI from
-- the name space with a local component.
makeNamespaceQName ::
Namespace -- ^ The name space URI is used in the qualified name
-> LName -- ^ local component of the qualified name (can be 'emptyLName')
-> QName
makeNamespaceQName (Namespace _ uri) = newQName uri
{-
nullNamespace :: Namespace
nullNamespace = Namespace Nothing ""
-}
-- | Utility routine to create a \@prefix line (matching N3/Turtle)
-- grammar for this namespace.
--
namespaceToBuilder :: Namespace -> B.Builder
namespaceToBuilder (Namespace pre uri) =
mconcat $ map B.fromText
[ "@prefix ", fromMaybe "" pre, ": <", T.pack (show uri), "> .\n"]
------------------------------------------------------------
-- ScopedName, made from a namespace and a local name
------------------------------------------------------------
-- | A full ScopedName value has a QName prefix, namespace URI
-- and a local part. ScopedName values may omit the prefix
-- (see 'Namespace') or the local part.
--
-- Some applications may handle null namespace URIs as meaning
-- the local part is relative to some base URI.
--
data ScopedName = ScopedName !QName Namespace LName
-- | Returns the local part.
getScopeLocal :: ScopedName -> LName
getScopeLocal (ScopedName _ _ l) = l
-- | Returns the namespace.
getScopeNamespace :: ScopedName -> Namespace
getScopeNamespace (ScopedName _ ns _) = ns
-- | Returns the prefix of the namespace, if set.
getScopePrefix :: ScopedName -> Maybe T.Text
getScopePrefix = getNamespacePrefix . getScopeNamespace
-- | Returns the URI of the namespace.
getScopeURI :: ScopedName -> URI
getScopeURI = getNamespaceURI . getScopeNamespace
-- | This is not total since it will fail if the input string is not a valid 'URI'.
instance IsString ScopedName where
fromString s =
maybe (error ("Unable to convert " ++ s ++ " into a ScopedName"))
makeURIScopedName (parseURIReference s)
-- | Scoped names are equal if their corresponding 'QName' values are equal.
instance Eq ScopedName where
sn1 == sn2 = getQName sn1 == getQName sn2
-- | Scoped names are ordered by their 'QName' components.
instance Ord ScopedName where
compare = comparing getQName
-- | If there is a namespace associated then the Show instance
-- uses @prefix:local@, otherwise @<url>@.
instance Show ScopedName where
show (ScopedName qn n l) = case getNamespacePrefix n of
Just pre -> T.unpack $ mconcat [pre, ":", getLName l]
_ -> show qn -- "<" ++ show (getNamespaceURI n) ++ T.unpack l ++ ">"
-- |Get the QName corresponding to a scoped name.
getQName :: ScopedName -> QName
getQName (ScopedName qn _ _) = qn
-- |Get URI corresponding to a scoped name (using RDF conventions).
getScopedNameURI :: ScopedName -> URI
getScopedNameURI = getQNameURI . getQName
-- |Test if supplied string matches the display form of a
-- scoped name.
matchName :: String -> ScopedName -> Bool
matchName str nam = str == show nam
-- |Construct a ScopedName.
makeScopedName ::
Maybe T.Text -- ^ prefix for the namespace
-> URI -- ^ namespace
-> LName -- ^ local name
-> ScopedName
makeScopedName pre nsuri local =
ScopedName (newQName nsuri local)
(Namespace pre nsuri)
local
-- |Construct a ScopedName from a QName.
makeQNameScopedName ::
Maybe T.Text -- ^ prefix
-> QName
-> ScopedName
makeQNameScopedName pre qn = ScopedName qn (Namespace pre (getNamespace qn)) (getLocalName qn)
-- could use qnameFromURI to find a local name if there is one.
-- | Construct a ScopedName for a bare URI (the label is set to \"\").
makeURIScopedName :: URI -> ScopedName
makeURIScopedName uri = makeScopedName Nothing uri emptyLName
-- | Construct a ScopedName.
makeNSScopedName ::
Namespace -- ^ namespace
-> LName -- ^ local component
-> ScopedName
makeNSScopedName ns local =
ScopedName (newQName (getNamespaceURI ns) local) ns local
-- | This should never appear as a valid name
nullScopedName :: ScopedName
nullScopedName = makeURIScopedName nullURI
--------------------------------------------------------------------------------
--
-- Copyright (c) 2003, Graham Klyne, 2009 Vasili I Galchin,
-- 2011, 2012 Douglas Burke
-- All rights reserved.
--
-- This file is part of Swish.
--
-- Swish is free software; you can redistribute it and/or modify
-- it under the terms of the GNU General Public License as published by
-- the Free Software Foundation; either version 2 of the License, or
-- (at your option) any later version.
--
-- Swish is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU General Public License for more details.
--
-- You should have received a copy of the GNU General Public License
-- along with Swish; if not, write to:
-- The Free Software Foundation, Inc.,
-- 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
--
--------------------------------------------------------------------------------
| DougBurke/swish | src/Swish/Namespace.hs | lgpl-2.1 | 8,529 | 0 | 12 | 1,685 | 1,250 | 716 | 534 | 107 | 1 |
-- Utilities for surface manipulation
module Cane.Image where
import Data.List
import Foreign
import Control.Monad
import Graphics.UI.SDL as SDL hiding (Color)
import Glove.Types
colorToRGB :: Color -> (Word8, Word8, Word8)
colorToRGB (Color r g b _) = (fromIntegral r, fromIntegral g, fromIntegral b)
toPixel :: Color -> Pixel
toPixel (Color r g b _) = Pixel $ foldl' accum 0 [r,g,b]
where
accum a o = (a `shiftL` 8) .|. fromIntegral o
-- |Read the pixel at (x,y) on s.
-- If it is white, replace it on the surface s'
-- by the color of pixel p. If not, do nothing.
pixToR :: Surface -> Surface -> Pixel -> (Int, Int) -> IO ()
pixToR s s' p (x,y) = do pix <- getPixel s x y
when (isWhite pix) $ setPixel s' x y p
isWhite :: Pixel -> Bool
isWhite (Pixel 0x00ffffff) = True
isWhite _ = False
getPixel :: Surface -> Int -> Int -> IO Pixel
getPixel s x y = do pixels <- castPtr `liftM` surfaceGetPixels s
Pixel `liftM` peekElemOff pixels ((y * surfaceGetWidth s) + x)
setPixel :: Surface -> Int -> Int -> Pixel -> IO ()
setPixel s x y (Pixel p) =
do pixels <- castPtr `liftM` surfaceGetPixels s
pokeElemOff pixels ((y * surfaceGetWidth s) + x) p
prepareSurface' :: Width -> Height -> IO Surface
prepareSurface' w h = prepareSurface w h 0x00 0x00 0x00 0x00
>>= transparencyOn 0x00 0x00 0x00
prepareSurface :: Width -> Height -> Word32 -> Word32 -> Word32 -> Word32 -> IO Surface
prepareSurface w h r g b a =
createRGBSurface [SDL.SWSurface] w h 32 r g b a
>>= displayFormat
transparencyOn :: Word8 -> Word8 -> Word8 -> Surface -> IO Surface
transparencyOn r g b s
= (mapRGB . surfaceGetPixelFormat) s r g b
>>= setColorKey s [SrcColorKey]
>> return s
| Raveline/Cane | lib/Cane/Image.hs | lgpl-3.0 | 1,774 | 0 | 13 | 435 | 664 | 343 | 321 | 36 | 1 |
--{-# LANGUAGE QuasiQuotes #-}
module HC.Use where
--import Language.LBNF.Runtime (printTree)
--import Prelude hiding (exp)
--import HC.Def
| haroldcarr/learn-haskell-coq-ml-etc | haskell/topic/parsing/bnfc-meta-examples/src/HC/Use.hs | unlicense | 187 | 0 | 3 | 63 | 10 | 8 | 2 | 1 | 0 |
{-
Copyright 2015 Tristan Aubrey-Jones
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-}
import Compiler.Front.Common
import Compiler.Front.Indices (IdxMonad)
import Compiler.Front.Front
import Compiler.Front.ExprTree
import Compiler.Front.SrcLexer
import Compiler.Front.SrcParser
import Compiler.Front.Preprocess
import Compiler.Types.Types
import Compiler.Types.TypeAssignment (assignTypes, showExprWithTypes)
import Compiler.Back.FromFront
import Compiler.Back.GraphBuilder
import Compiler.Back.ShowGraph (dotDraw)
import Data.List (isSuffixOf)
import System.Directory
import Control.Monad.State.Strict (lift)
main2 :: IdxMonad IO ()
main2 = do
lift $ putStr "DFG generation:\n"
let relDir = "/Compiler/Tests/GraphBuild/"
-- load types
curDir <- lift $ getCurrentDirectory
let typesPath = curDir ++ relDir ++ "lib1.types"
lift $ putStr $ "Load type defs from: " ++ typesPath ++ "..."
(varIds, typeDefs) <- loadTypes typesPath -- varIds maps var names to idxs, typeDefs maps var ids to type schemes
lift $ putStr "Done.\n"
-- load source file
let testFile = curDir ++ relDir ++ "program1.flocc"
ast <- parseSrcFile varIds testFile
-- preprocess source
lift $ putStr "Preprocessing AST..."
ast' <- preprocessExpr varIds ast
lift $ putStr $ show ast'
lift $ putStr "Success.\n"
-- perform type assignment
lift $ putStr "Inferring program types..."
astTypes <- assignTypes typeDefs ast'
lift $ putStr "Success.\n"
--lift $ putStr $ showExprWithTypes astTypes ast
-- translate into back end types
lift $ putStr "Translating into backend types..."
let astTypes' = translateTyEnv astTypes
lift $ putStr $ show astTypes'
lift $ putStr "Done.\n"
-- generate data flow graph...
lift $ putStr "Building DFG from AST..."
(graph, graphTypes) <- graphFromExpr astTypes' ast'
lift $ putStr $ dotDraw graph --show graph
--lift $ putStr $ show graphTypes
lift $ putStr "Done.\n"
lift $ putStr "\n\n"
return ()
main :: IO ()
main = do
evalIdxStateT 0 main2
return ()
| flocc-net/flocc | v0.1/Compiler/Tests/GraphBuild/Run.hs | apache-2.0 | 2,518 | 0 | 11 | 432 | 492 | 246 | 246 | 47 | 1 |
-- | A custom build script for CUDA support.
--
-- Copyright (C) 2014 Braam Research, LLC.
module Main(main) where
import Control.Applicative
import Control.Monad
import Distribution.Simple
import Distribution.Simple.BuildPaths (exeExtension)
import Distribution.Simple.Configure (configure)
import Distribution.Simple.Setup
import Distribution.Simple.LocalBuildInfo
import Distribution.Simple.Program
import Distribution.Simple.Program.Find
import Distribution.Simple.Program.Types
import Distribution.Simple.Utils
import Distribution.System
import Distribution.PackageDescription as PD hiding (Flag)
import Distribution.Text
import Distribution.ParseUtils
import Distribution.Compat.ReadP
import Distribution.Verbosity
import Data.Maybe
import Data.Char
import Data.List ( intersect )
import Debug.Trace
import System.Directory hiding (exeExtension)
import System.Process
import System.FilePath
-- | Run cabal, ensuring that CUDA & CUPTI get found
main = defaultMainWithHooks simpleUserHooks {
-- NB: The following allows us to override NVCC location and
-- options from the command line. The syntax is slightly
-- non-obious:
--
-- $ cabal configure -- --with-nvcc=[...] --nvcc-options=[...]
--
-- Note the extra "--".
hookedPrograms = nvccProgram : hookedPrograms simpleUserHooks,
confHook = cudaConfigHook,
buildHook = cudaBuildHook,
copyHook = cudaCopyHook
}
nvccProgram :: Program
nvccProgram = (simpleProgram "nvcc")
{ programFindVersion = findProgramVersion "--version" verSel }
where verSel "" = ""
verSel output = tail $ last $ words $ last $ lines output
cudaConfigHook :: (GenericPackageDescription, HookedBuildInfo) -> ConfigFlags ->
IO LocalBuildInfo
cudaConfigHook dat flags = do
let verbose = fromFlagOrDefault normal $ configVerbosity flags
m_nvccPath <- findProgramOnSearchPath verbose [ProgramSearchPathDefault] "nvcc"
-- Irrespectively of whether there are actual CUDA sources or not,
-- we want to make sure that CUDA & CUPTI libraries can actually get
-- found by the linker. We simply assume that it sits at a certain
-- path relative to NVCC - awkward, but that's what the "cuda"
-- package does, so who am I to judge.
let flags' nvcc = flags { configExtraLibDirs = configExtraLibDirs flags ++
[cudaLibDir]
, configExtraIncludeDirs = configExtraIncludeDirs flags ++
[cudaIncDir]
}
where cudaBaseDir = takeDirectory $ takeDirectory nvcc
cudaLibDir = cudaBaseDir </> libDir
cudaIncDir = cudaBaseDir </> "include"
libDir = case buildPlatform of
Platform X86_64 Windows -> "lib" </> "x64"
Platform I386 Windows -> "lib" </> "Win32"
Platform X86_64 _ -> "lib64"
Platform _ _ -> "lib"
case m_nvccPath of
Just nvcc -> putStrLn ("Found CUDA in " ++ takeDirectory (takeDirectory nvcc)) >>
confHook simpleUserHooks dat (flags' nvcc)
Nothing -> confHook simpleUserHooks dat flags
buildCuda :: Bool -> PackageDescription -> LocalBuildInfo -> Verbosity
-> IO (PackageDescription, [FilePath])
buildCuda doBuild package lbi verbose = do
-- Find all CUDA sources in libraries & executables. Update
-- build information accordingly.
(library', lib_cubins) <- case library package of
Just lib -> do (bi', cubins) <- cudaBuildInfo doBuild lbi verbose
(buildDir lbi) "" (libBuildInfo lib)
return (Just lib { libBuildInfo = bi' }, cubins)
Nothing -> return (Nothing, [])
-- Attempt to be smart about when to build CUDA sources for
-- executables...
let exesToBuild = map exeName (executables package) -- `intersect` buildArgs flags
shouldBuild e = buildable (buildInfo e) &&
(null exesToBuild || exeName e `elem` exesToBuild)
exe_cubinss <- forM (filter shouldBuild $ executables package) $ \exe -> do
-- Build directory & real exe name, copied from Distribution/Simple/GHC.hs.
-- Would be brilliant if there was a more direct way to get this...
let dir = buildDir lbi </> exeName exe
exeNameReal = exeName exe <.> (if takeExtension (exeName exe) /= ('.':exeExtension)
then exeExtension
else "")
(bi', cubins) <- cudaBuildInfo doBuild lbi verbose dir exeNameReal (buildInfo exe)
return (exe { buildInfo = bi' }, cubins)
let (executables', cubinss) = unzip exe_cubinss
-- Carry on, given our sneaky modificiations...
return (package { library = library'
, executables = executables' },
lib_cubins ++ concat cubinss)
cudaBuildInfo :: Bool -> LocalBuildInfo -> Verbosity -> FilePath -> FilePath -> BuildInfo
-> IO (BuildInfo, [FilePath])
cudaBuildInfo doBuild lbi verbose buildDir nameReal bi = do
-- Get CUDA command line options
let parseOpt rp = map fst . filter (all isSpace . snd) . readP_to_S rp
cudaOptLine = fromMaybe "" $ lookup "x-cuda-options" (customFieldsBI bi)
cudaOpts = concat $ parseOpt (sepBy parseTokenQ' (munch1 isSpace)) cudaOptLine
-- Prepare for building
(gcc,_) <- requireProgram verbose gccProgram (withPrograms lbi)
let mkOutput ext = (buildDir </>) . flip replaceExtension ext . takeFileName
when doBuild $ createDirectoryIfMissingVerbose verbose True buildDir
-- Rebuild check
let checkRebuild src out io = do
srcMoreRecent <- moreRecentFile src out
cabalMoreRecent <- maybe (return False) (flip moreRecentFile out) (pkgDescrFile lbi)
when (srcMoreRecent || cabalMoreRecent) io
-- Force rebuilding the library/executable by deleting it
let invalidate = do
let path = buildDir </> nameReal
exists <- doesFileExist path
when exists $ removeFile path
-- Build CUBINs
cubins <- case lookup "x-cuda-sources-cubin" (customFieldsBI bi) of
Nothing -> return []
Just cudaSrcLine -> do
(nvcc,_) <- requireProgram verbose nvccProgram (withPrograms lbi)
let parses = parseOpt (parseOptCommaList parseFilePathQ) cudaSrcLine
cudaSources = head parses
when (null parses) $ die "Failed to parse x-cuda-sources-cubin field."
let outputFiles = map (mkOutput "cubin") cudaSources
when doBuild $ forM_ (zip cudaSources outputFiles) $ \(src, out) ->
checkRebuild src out $ do
putStrLn $ "Building CUDA source " ++ src ++ "..."
invalidate
runProgram verbose nvcc (cudaOpts ++ ["--cubin", src, "-o", out])
return outputFiles
-- Build CUDA object files
bi' <- case lookup "x-cuda-sources" (customFieldsBI bi) of
Nothing -> return bi
Just cudaSrcLine -> do
(nvcc,_) <- requireProgram verbose nvccProgram (withPrograms lbi)
let parses = parseOpt (parseOptCommaList parseFilePathQ) cudaSrcLine
cudaSources = head parses
when (null parses) $ die "Failed to parse x-cuda-sources field."
let outputFiles = map (mkOutput "o") cudaSources
when doBuild $ forM_ (zip cudaSources outputFiles) $ \(src, out) ->
checkRebuild src out $ do
putStrLn $ "Building CUDA source " ++ src ++ "..."
invalidate
runProgram verbose nvcc (cudaOpts ++ ["-c", src, "-o", out])
-- Now for the hacky part: Get the linker to actually link
-- this. I am 99% sure that this is the wrong way. In fact, it
-- will fail to pick up the object file for ".a" libraries.
return bi { ldOptions = ldOptions bi ++ outputFiles }
-- Finally build Halide object files
let halideOptLine = fromMaybe "" $ lookup "x-halide-options" (customFieldsBI bi)
halideOpts = concat $ parseOpt (sepBy parseTokenQ' (munch1 isSpace)) halideOptLine
bi'' <- case lookup "x-halide-sources" (customFieldsBI bi) of
Nothing -> return bi
Just cudaSrcLine -> do
let parses = parseOpt (parseOptCommaList parseFilePathQ) cudaSrcLine
halideSources = head parses
when (null parses) $ die "Failed to parse x-halide-sources field."
let genFiles = map (mkOutput "gen") halideSources
outputFiles = map (mkOutput "kern.o") halideSources
when doBuild $ forM_ (zip3 halideSources genFiles outputFiles) $ \(src, gen, out) ->
checkRebuild src out $ do
putStrLn $ "Building Halide source " ++ src ++ "..."
invalidate
runProgram verbose gcc $ concat
[ map ("-I"++) (PD.includeDirs bi)
, map ("-L"++) (PD.extraLibDirs bi)
, [src, "-o", gen]
, halideOpts
]
runProgramInvocation verbose $ simpleProgramInvocation gen [out]
-- Yet again, hackily link the results in.
return bi { ldOptions = ldOptions bi ++ outputFiles }
return (bi'', cubins)
cudaBuildHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> BuildFlags -> IO ()
cudaBuildHook package lbi hooks flags = do
(package', _) <- buildCuda True package lbi (fromFlag $ buildVerbosity flags)
buildHook simpleUserHooks package' lbi hooks flags
cudaCopyHook :: PackageDescription -> LocalBuildInfo -> UserHooks -> CopyFlags -> IO ()
cudaCopyHook package lbi hooks flags = do
let verbose = fromFlag $ copyVerbosity flags
(package', outs) <- buildCuda False package lbi verbose
let installDirs = absoluteInstallDirs package lbi (fromFlag (copyDest flags))
createDirectoryIfMissingVerbose verbose True (datadir installDirs)
forM_ outs $ \file ->
installOrdinaryFile (fromFlag $ copyVerbosity flags) file
(datadir installDirs </> takeFileName file)
copyHook simpleUserHooks package' lbi hooks flags
| SKA-ScienceDataProcessor/RC | MS5/Setup.hs | apache-2.0 | 10,176 | 0 | 24 | 2,729 | 2,451 | 1,234 | 1,217 | 159 | 5 |
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
--------------------------------------------------------------------------------
-- |
-- Module : Data.Projection
-- Copyright : (c) 2014 Patrick Bahr
-- License : BSD3
-- Maintainer : Patrick Bahr <[email protected]>
-- Stability : experimental
-- Portability : non-portable (GHC Extensions)
--
-- This module provides a generic projection function 'pr' for
-- arbitrary nested binary products.
--
--------------------------------------------------------------------------------
module Data.Projection (pr, (:<)) where
import Prelude hiding (Either (..))
data Pos = Here | Left Pos | Right Pos
data RPos = NotFound | Ambiguous | Found Pos
type family Ch (l :: RPos) (r :: RPos) :: RPos where
Ch (Found x) (Found y) = Ambiguous
Ch Ambiguous y = Ambiguous
Ch x Ambiguous = Ambiguous
Ch (Found x) y = Found (Left x)
Ch x (Found y) = Found (Right y)
Ch x y = NotFound
type family Elem (e :: *) (p :: *) :: RPos where
Elem e e = Found Here
Elem e (l,r) = Ch (Elem e l) (Elem e r)
Elem e p = NotFound
data Pointer (pos :: RPos) e p where
Phere :: Pointer (Found Here) e e
Pleft :: Pointer (Found pos) e p -> Pointer (Found (Left pos)) e (p,p')
Pright :: Pointer (Found pos) e p -> Pointer (Found (Right pos)) e (p',p)
class GetPointer (pos :: RPos) e p where
pointer :: Pointer pos e p
instance GetPointer (Found Here) e e where
pointer = Phere
instance GetPointer (Found pos) e p => GetPointer (Found (Left pos)) e (p, p') where
pointer = Pleft pointer
instance GetPointer (Found pos) e p => GetPointer (Found (Right pos)) e (p', p) where
pointer = Pright pointer
pr' :: Pointer pos e p -> p -> e
pr' Phere e = e
pr' (Pleft p) (x,_) = pr' p x
pr' (Pright p) (_,y) = pr' p y
-- | The constraint @e :< p@ expresses that @e@ is a component of the
-- type @p@. That is, @p@ is formed by binary products using the type
-- @e@. The occurrence of @e@ must be unique. For example we have @Int
-- :< (Bool,(Int,Bool))@ but not @Bool :< (Bool,(Int,Bool))@.
type (e :< p) = GetPointer (Elem e p) e p
-- | This function projects the component of type @e@ out or the
-- compound value of type @p@.
pr :: forall e p . (e :< p) => p -> e
pr p = pr' (pointer :: Pointer (Elem e p) e p) p
| pa-ba/projection | src/Data/Projection.hs | bsd-3-clause | 2,695 | 0 | 11 | 629 | 758 | 422 | 336 | 45 | 1 |
{-|
Module : Numeric.CatchingExceptions.MixedTypes.Comparisons
Description : CatchingExceptions comparisons
Copyright : (c) Michal Konecny
License : BSD3
Maintainer : [email protected]
Stability : experimental
Portability : portable
Comparison instances. See parent module for more information.
-}
module Numeric.CatchingExceptions.MixedTypes.Comparisons
where
import Numeric.MixedTypes
-- import qualified Prelude as P
import Numeric.CatchingExceptions.Type
import Numeric.CatchingExceptions.Lifts
-- import Numeric.CatchingExceptions.MixedTypes.Conversions ()
{----- equality comparisons -----}
instance (HasEqCertainlyAsymmetric t1 t2) =>
HasEqAsymmetric (CatchingNumExceptions t1) (CatchingNumExceptions t2)
where
type EqCompareType (CatchingNumExceptions t1) (CatchingNumExceptions t2) =
Maybe (EqCompareType t1 t2)
equalTo = liftCCtoO equalTo
notEqualTo = liftCCtoO notEqualTo
instance (HasEqCertainlyAsymmetric Integer t2) =>
HasEqAsymmetric Integer (CatchingNumExceptions t2)
where
type EqCompareType Integer (CatchingNumExceptions t2) =
Maybe (EqCompareType Integer t2)
equalTo = liftOCtoO equalTo
notEqualTo = liftOCtoO notEqualTo
instance (HasEqCertainlyAsymmetric t1 Integer) =>
HasEqAsymmetric (CatchingNumExceptions t1) Integer
where
type EqCompareType (CatchingNumExceptions t1) Integer =
Maybe (EqCompareType t1 Integer)
equalTo = liftCOtoO equalTo
notEqualTo = liftCOtoO notEqualTo
instance (HasEqCertainlyAsymmetric Int t2) =>
HasEqAsymmetric Int (CatchingNumExceptions t2)
where
type EqCompareType Int (CatchingNumExceptions t2) =
Maybe (EqCompareType Int t2)
equalTo = liftOCtoO equalTo
notEqualTo = liftOCtoO notEqualTo
instance (HasEqCertainlyAsymmetric t1 Int) =>
HasEqAsymmetric (CatchingNumExceptions t1) Int
where
type EqCompareType (CatchingNumExceptions t1) Int =
Maybe (EqCompareType t1 Int)
equalTo = liftCOtoO equalTo
notEqualTo = liftCOtoO notEqualTo
instance (HasEqCertainlyAsymmetric Rational t2) =>
HasEqAsymmetric Rational (CatchingNumExceptions t2)
where
type EqCompareType Rational (CatchingNumExceptions t2) =
Maybe (EqCompareType Rational t2)
equalTo = liftOCtoO equalTo
notEqualTo = liftOCtoO notEqualTo
instance (HasEqCertainlyAsymmetric t1 Rational) =>
HasEqAsymmetric (CatchingNumExceptions t1) Rational
where
type EqCompareType (CatchingNumExceptions t1) Rational =
Maybe (EqCompareType t1 Rational)
equalTo = liftCOtoO equalTo
notEqualTo = liftCOtoO notEqualTo
{----- order comparisons -----}
instance (HasOrderCertainlyAsymmetric t1 t2) =>
HasOrderAsymmetric (CatchingNumExceptions t1) (CatchingNumExceptions t2)
where
type OrderCompareType (CatchingNumExceptions t1) (CatchingNumExceptions t2) =
Maybe (OrderCompareType t1 t2)
lessThan = liftCCtoO lessThan
leq = liftCCtoO leq
greaterThan = liftCCtoO greaterThan
geq = liftCCtoO geq
instance (HasOrderCertainlyAsymmetric Integer t2) =>
HasOrderAsymmetric Integer (CatchingNumExceptions t2)
where
type OrderCompareType Integer (CatchingNumExceptions t2) =
Maybe (OrderCompareType Integer t2)
lessThan = liftOCtoO lessThan
leq = liftOCtoO leq
greaterThan = liftOCtoO greaterThan
geq = liftOCtoO geq
instance (HasOrderCertainlyAsymmetric t1 Integer) =>
HasOrderAsymmetric (CatchingNumExceptions t1) Integer
where
type OrderCompareType (CatchingNumExceptions t1) Integer =
Maybe (OrderCompareType t1 Integer)
lessThan = liftCOtoO lessThan
leq = liftCOtoO leq
greaterThan = liftCOtoO greaterThan
geq = liftCOtoO geq
instance (HasOrderCertainlyAsymmetric Int t2) =>
HasOrderAsymmetric Int (CatchingNumExceptions t2)
where
type OrderCompareType Int (CatchingNumExceptions t2) =
Maybe (OrderCompareType Int t2)
lessThan = liftOCtoO lessThan
leq = liftOCtoO leq
greaterThan = liftOCtoO greaterThan
geq = liftOCtoO geq
instance (HasOrderCertainlyAsymmetric t1 Int) =>
HasOrderAsymmetric (CatchingNumExceptions t1) Int
where
type OrderCompareType (CatchingNumExceptions t1) Int =
Maybe (OrderCompareType t1 Int)
lessThan = liftCOtoO lessThan
leq = liftCOtoO leq
greaterThan = liftCOtoO greaterThan
geq = liftCOtoO geq
instance (HasOrderCertainlyAsymmetric Rational t2) =>
HasOrderAsymmetric Rational (CatchingNumExceptions t2)
where
type OrderCompareType Rational (CatchingNumExceptions t2) =
Maybe (OrderCompareType Rational t2)
lessThan = liftOCtoO lessThan
leq = liftOCtoO leq
greaterThan = liftOCtoO greaterThan
geq = liftOCtoO geq
instance (HasOrderCertainlyAsymmetric t1 Rational) =>
HasOrderAsymmetric (CatchingNumExceptions t1) Rational
where
type OrderCompareType (CatchingNumExceptions t1) Rational =
Maybe (OrderCompareType t1 Rational)
lessThan = liftCOtoO lessThan
leq = liftCOtoO leq
greaterThan = liftCOtoO greaterThan
geq = liftCOtoO geq
{----- unary operations -----}
instance
(CanNeg t, CanTestValid (NegType t), Show (NegType t)) =>
CanNeg (CatchingNumExceptions t)
where
type NegType (CatchingNumExceptions t) = CatchingNumExceptions (NegType t)
negate = liftCtoC negate
instance
(CanAbs t, CanTestValid (AbsType t), Show (AbsType t)) =>
CanAbs (CatchingNumExceptions t)
where
type AbsType (CatchingNumExceptions t) = CatchingNumExceptions (AbsType t)
abs = liftCtoC abs
{----- min/max -----}
instance
(CanMinMaxAsymmetric t1 t2,
CanTestValid (MinMaxType t1 t2), Show (MinMaxType t1 t2))
=>
CanMinMaxAsymmetric (CatchingNumExceptions t1) (CatchingNumExceptions t2)
where
type MinMaxType (CatchingNumExceptions t1) (CatchingNumExceptions t2) =
CatchingNumExceptions (MinMaxType t1 t2)
min = liftCCtoC min
max = liftCCtoC max
instance
(CanMinMaxAsymmetric Integer t2,
CanTestValid (MinMaxType Integer t2), Show (MinMaxType Integer t2))
=>
CanMinMaxAsymmetric Integer (CatchingNumExceptions t2)
where
type MinMaxType Integer (CatchingNumExceptions t2) =
CatchingNumExceptions (MinMaxType Integer t2)
min = liftOCtoC min
max = liftOCtoC max
instance
(CanMinMaxAsymmetric t1 Integer,
CanTestValid (MinMaxType t1 Integer), Show (MinMaxType t1 Integer))
=>
CanMinMaxAsymmetric (CatchingNumExceptions t1) Integer
where
type MinMaxType (CatchingNumExceptions t1) Integer =
CatchingNumExceptions (MinMaxType t1 Integer)
min = liftCOtoC min
max = liftCOtoC max
instance
(CanMinMaxAsymmetric Int t2,
CanTestValid (MinMaxType Int t2), Show (MinMaxType Int t2))
=>
CanMinMaxAsymmetric Int (CatchingNumExceptions t2)
where
type MinMaxType Int (CatchingNumExceptions t2) =
CatchingNumExceptions (MinMaxType Int t2)
min = liftOCtoC min
max = liftOCtoC max
instance
(CanMinMaxAsymmetric t1 Int,
CanTestValid (MinMaxType t1 Int), Show (MinMaxType t1 Int))
=>
CanMinMaxAsymmetric (CatchingNumExceptions t1) Int
where
type MinMaxType (CatchingNumExceptions t1) Int =
CatchingNumExceptions (MinMaxType t1 Int)
min = liftCOtoC min
max = liftCOtoC max
instance
(CanMinMaxAsymmetric Rational t2,
CanTestValid (MinMaxType Rational t2), Show (MinMaxType Rational t2))
=>
CanMinMaxAsymmetric Rational (CatchingNumExceptions t2)
where
type MinMaxType Rational (CatchingNumExceptions t2) =
CatchingNumExceptions (MinMaxType Rational t2)
min = liftOCtoC min
max = liftOCtoC max
instance
(CanMinMaxAsymmetric t1 Rational,
CanTestValid (MinMaxType t1 Rational), Show (MinMaxType t1 Rational))
=>
CanMinMaxAsymmetric (CatchingNumExceptions t1) Rational
where
type MinMaxType (CatchingNumExceptions t1) Rational =
CatchingNumExceptions (MinMaxType t1 Rational)
min = liftCOtoC min
max = liftCOtoC max
| michalkonecny/num-exceptions | src/Numeric/CatchingExceptions/MixedTypes/Comparisons.hs | bsd-3-clause | 7,772 | 0 | 8 | 1,322 | 2,025 | 1,032 | 993 | -1 | -1 |
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE ViewPatterns #-}
-- Create a source distribution tarball
module Stack.SDist
( getSDistTarball
) where
import qualified Codec.Archive.Tar as Tar
import qualified Codec.Archive.Tar.Entry as Tar
import qualified Codec.Compression.GZip as GZip
import Control.Applicative
import Control.Concurrent.Execute (ActionContext(..))
import Control.Monad (when, void)
import Control.Monad.Catch (MonadMask)
import Control.Monad.IO.Class
import Control.Monad.Logger
import Control.Monad.Reader (MonadReader, asks)
import Control.Monad.Trans.Control (liftBaseWith)
import Control.Monad.Trans.Resource
import qualified Data.ByteString as S
import qualified Data.ByteString.Lazy as L
import Data.Data (Data, Typeable, cast, gmapT)
import Data.Either (partitionEithers)
import Data.List
import qualified Data.Map.Strict as Map
import Data.Maybe (fromMaybe)
import Data.Monoid ((<>))
import qualified Data.Set as Set
import qualified Data.Text as T
import qualified Data.Text.Lazy as TL
import qualified Data.Text.Lazy.Encoding as TLE
import Distribution.Package (Dependency (..))
import Distribution.PackageDescription.PrettyPrint (showGenericPackageDescription)
import Distribution.Version (simplifyVersionRange, orLaterVersion, earlierVersion)
import Distribution.Version.Extra
import Network.HTTP.Client.Conduit (HasHttpManager)
import Path
import Path.IO
import Prelude -- Fix redundant import warnings
import Stack.Build (mkBaseConfigOpts)
import Stack.Build.Execute
import Stack.Build.Installed
import Stack.Build.Source (loadSourceMap, localFlags)
import Stack.Build.Target
import Stack.Constants
import Stack.Package
import Stack.Types
import Stack.Types.Internal
import qualified System.FilePath as FP
type M env m = (MonadIO m,MonadReader env m,HasHttpManager env,MonadLogger m,MonadBaseControl IO m,MonadMask m,HasLogLevel env,HasEnvConfig env,HasTerminal env)
-- | Given the path to a local package, creates its source
-- distribution tarball.
--
-- While this yields a 'FilePath', the name of the tarball, this
-- tarball is not written to the disk and instead yielded as a lazy
-- bytestring.
getSDistTarball :: M env m
=> Maybe PvpBounds -- ^ override Config value
-> Path Abs Dir
-> m (FilePath, L.ByteString)
getSDistTarball mpvpBounds pkgDir = do
config <- asks getConfig
let pvpBounds = fromMaybe (configPvpBounds config) mpvpBounds
tweakCabal = pvpBounds /= PvpBoundsNone
pkgFp = toFilePath pkgDir
lp <- readLocalPackage pkgDir
$logInfo $ "Getting file list for " <> T.pack pkgFp
(fileList, cabalfp) <- getSDistFileList lp
$logInfo $ "Building sdist tarball for " <> T.pack pkgFp
files <- normalizeTarballPaths (lines fileList)
-- NOTE: Could make this use lazy I/O to only read files as needed
-- for upload (both GZip.compress and Tar.write are lazy).
-- However, it seems less error prone and more predictable to read
-- everything in at once, so that's what we're doing for now:
let tarPath isDir fp = either error id
(Tar.toTarPath isDir (pkgId FP.</> fp))
packWith f isDir fp =
liftIO $ f (pkgFp FP.</> fp)
(tarPath isDir fp)
packDir = packWith Tar.packDirectoryEntry True
packFile fp
| tweakCabal && isCabalFp fp = do
lbs <- getCabalLbs pvpBounds $ toFilePath cabalfp
return $ Tar.fileEntry (tarPath False fp) lbs
| otherwise = packWith Tar.packFileEntry False fp
isCabalFp fp = toFilePath pkgDir FP.</> fp == toFilePath cabalfp
tarName = pkgId FP.<.> "tar.gz"
pkgId = packageIdentifierString (packageIdentifier (lpPackage lp))
dirEntries <- mapM packDir (dirsFromFiles files)
fileEntries <- mapM packFile files
return (tarName, GZip.compress (Tar.write (dirEntries ++ fileEntries)))
-- | Get the PVP bounds-enabled version of the given cabal file
getCabalLbs :: M env m => PvpBounds -> FilePath -> m L.ByteString
getCabalLbs pvpBounds fp = do
bs <- liftIO $ S.readFile fp
(_warnings, gpd) <- readPackageUnresolvedBS Nothing bs
(_, _, _, _, sourceMap) <- loadSourceMap AllowNoTargets defaultBuildOpts
menv <- getMinimalEnvOverride
(installedMap, _, _) <- getInstalled menv GetInstalledOpts
{ getInstalledProfiling = False
, getInstalledHaddock = False
}
sourceMap
let gpd' = gtraverseT (addBounds sourceMap installedMap) gpd
return $ TLE.encodeUtf8 $ TL.pack $ showGenericPackageDescription gpd'
where
addBounds :: SourceMap -> InstalledMap -> Dependency -> Dependency
addBounds sourceMap installedMap dep@(Dependency cname range) =
case lookupVersion (fromCabalPackageName cname) of
Nothing -> dep
Just version -> Dependency cname $ simplifyVersionRange
$ (if toAddUpper && not (hasUpper range) then addUpper version else id)
$ (if toAddLower && not (hasLower range) then addLower version else id)
range
where
lookupVersion name =
case Map.lookup name sourceMap of
Just (PSLocal lp) -> Just $ packageVersion $ lpPackage lp
Just (PSUpstream version _ _) -> Just version
Nothing ->
case Map.lookup name installedMap of
Just (version, _, _) -> Just version
Nothing -> Nothing
addUpper version = intersectVersionRanges
(earlierVersion $ toCabalVersion $ nextMajorVersion version)
addLower version = intersectVersionRanges
(orLaterVersion (toCabalVersion version))
(toAddLower, toAddUpper) =
case pvpBounds of
PvpBoundsNone -> (False, False)
PvpBoundsUpper -> (False, True)
PvpBoundsLower -> (True, False)
PvpBoundsBoth -> (True, True)
-- | Traverse a data type.
gtraverseT :: (Data a,Typeable b) => (Typeable b => b -> b) -> a -> a
gtraverseT f =
gmapT (\x -> case cast x of
Nothing -> gtraverseT f x
Just b -> fromMaybe x (cast (f b)))
-- Read in a 'LocalPackage' config. This makes some default decisions
-- about 'LocalPackage' fields that might not be appropriate for other
-- usecases.
--
-- TODO: Dedupe with similar code in "Stack.Build.Source".
readLocalPackage :: M env m => Path Abs Dir -> m LocalPackage
readLocalPackage pkgDir = do
econfig <- asks getEnvConfig
bconfig <- asks getBuildConfig
cabalfp <- getCabalFileName pkgDir
name <- parsePackageNameFromFilePath cabalfp
let config = PackageConfig
{ packageConfigEnableTests = False
, packageConfigEnableBenchmarks = False
, packageConfigFlags = localFlags Map.empty bconfig name
, packageConfigCompilerVersion = envConfigCompilerVersion econfig
, packageConfigPlatform = configPlatform $ getConfig bconfig
}
(warnings,package) <- readPackage config cabalfp
mapM_ (printCabalFileWarning cabalfp) warnings
return LocalPackage
{ lpPackage = package
, lpExeComponents = Nothing -- HACK: makes it so that sdist output goes to a log instead of a file.
, lpDir = pkgDir
, lpCabalFile = cabalfp
-- NOTE: these aren't the 'correct values, but aren't used in
-- the usage of this function in this module.
, lpTestDeps = Map.empty
, lpBenchDeps = Map.empty
, lpTestBench = Nothing
, lpDirtyFiles = True
, lpNewBuildCache = Map.empty
, lpFiles = Set.empty
, lpComponents = Set.empty
}
-- | Returns a newline-separate list of paths, and the absolute path to the .cabal file.
getSDistFileList :: M env m => LocalPackage -> m (String, Path Abs File)
getSDistFileList lp =
withCanonicalizedSystemTempDirectory (stackProgName <> "-sdist") $ \tmpdir -> do
menv <- getMinimalEnvOverride
let bopts = defaultBuildOpts
baseConfigOpts <- mkBaseConfigOpts bopts
(_, _mbp, locals, _extraToBuild, sourceMap) <- loadSourceMap NeedTargets bopts
runInBase <- liftBaseWith $ \run -> return (void . run)
withExecuteEnv menv bopts baseConfigOpts locals
[] -- provide empty list of globals. This is a hack around custom Setup.hs files
sourceMap $ \ee -> do
withSingleContext runInBase ac ee task Nothing (Just "sdist") $ \_package cabalfp _pkgDir cabal _announce _console _mlogFile -> do
let outFile = toFilePath tmpdir FP.</> "source-files-list"
cabal False ["sdist", "--list-sources", outFile]
contents <- liftIO (readFile outFile)
return (contents, cabalfp)
where
package = lpPackage lp
ac = ActionContext Set.empty
task = Task
{ taskProvides = PackageIdentifier (packageName package) (packageVersion package)
, taskType = TTLocal lp
, taskConfigOpts = TaskConfigOpts
{ tcoMissing = Set.empty
, tcoOpts = \_ -> ConfigureOpts [] []
}
, taskPresent = Map.empty
}
normalizeTarballPaths :: M env m => [FilePath] -> m [FilePath]
normalizeTarballPaths fps = do
--TODO: consider whether erroring out is better - otherwise the
--user might upload an incomplete tar?
when (not (null outsideDir)) $
$logWarn $ T.concat
[ "Warning: These files are outside of the package directory, and will be omitted from the tarball: "
, T.pack (show outsideDir)]
return files
where
(outsideDir, files) = partitionEithers (map pathToEither fps)
pathToEither fp = maybe (Left fp) Right (normalizePath fp)
normalizePath :: FilePath -> (Maybe FilePath)
normalizePath = fmap FP.joinPath . go . FP.splitDirectories . FP.normalise
where
go [] = Just []
go ("..":_) = Nothing
go (_:"..":xs) = go xs
go (x:xs) = (x :) <$> go xs
dirsFromFiles :: [FilePath] -> [FilePath]
dirsFromFiles dirs = Set.toAscList (Set.delete "." results)
where
results = foldl' (\s -> go s . FP.takeDirectory) Set.empty dirs
go s x
| Set.member x s = s
| otherwise = go (Set.insert x s) (FP.takeDirectory x)
| rrnewton/stack | src/Stack/SDist.hs | bsd-3-clause | 10,819 | 0 | 21 | 2,924 | 2,616 | 1,388 | 1,228 | 197 | 10 |
module Y21.D06 where
import qualified Data.IntMap.Strict as M
import Imports
solve :: Int -> String -> Int
solve n =
sum
. map snd
. (!! n)
. iterate tick
. map ((, 1::Int) . read @Int)
. splitOn ","
where
tick :: (k ~ M.Key, Num v) => [(k, v)] -> [(k, v)]
tick xs = M.fromListWith (+) xs & M.toList >>= \case
(0, v) -> [(6, v), (8, v)]
(k, v) -> [(k-1, v)]
solve1 :: String -> Int
solve1 = solve 80
solve2 :: String -> Int
solve2 = solve 256
| oshyshko/adventofcode | src/Y21/D06.hs | bsd-3-clause | 500 | 0 | 13 | 151 | 260 | 147 | 113 | -1 | -1 |
-- Parser/Lexer for a small subset of the C language
-- Written by Alex Teiche February 2015
-- Based on Stephen Diehl's "Write You a Haskell" parser in Chapter 3
module Parser where
import Numeric
import Control.Monad
import Control.Applicative ((<$>))
import Data.Char
import Syntax
import Text.Parsec
import Text.Parsec.String (Parser)
import qualified Text.Parsec.Expr as Ex
import qualified Text.Parsec.Token as Tok
import Data.Functor.Identity
reservedNames :: [String]
reservedNames = ["int", "return"]
reservedOpNames :: [String]
reservedOpNames = ["-", "==", "=", "*"]
langDef :: Tok.LanguageDef ()
langDef = Tok.LanguageDef
{ Tok.commentStart = "/*"
, Tok.commentEnd = "*/"
, Tok.commentLine = "//"
, Tok.nestedComments = True
, Tok.identStart = letter
, Tok.identLetter = alphaNum <|> char '_'
, Tok.opStart = oneOf "+-=*"
, Tok.opLetter = oneOf "=*"
, Tok.reservedNames = reservedNames
, Tok.reservedOpNames = reservedOpNames
, Tok.caseSensitive = True
}
lexer :: Tok.TokenParser ()
lexer = Tok.makeTokenParser langDef
parens :: Parser a -> Parser a
parens = Tok.parens lexer
braces :: Parser a -> Parser a
braces = Tok.braces lexer
semi = Tok.semi lexer
comma = Tok.comma lexer
reserved :: String -> Parser ()
reserved = Tok.reserved lexer
semiSep :: Parser a -> Parser [a]
semiSep = Tok.semiSep lexer
commaSep = Tok.commaSep lexer
reservedOp :: String -> Parser ()
reservedOp = Tok.reservedOp lexer
--identifier :: String -> Parser String
identifier = Tok.identifier lexer
symbol = Tok.symbol lexer
binaryOp :: String -> Ex.Assoc -> Ex.Operator String () Identity Expr
binaryOp s assoc = Ex.Infix (reservedOp s >> return (BinOp s)) assoc
lassocOp :: String -> Ex.Operator String () Identity Expr
lassocOp s = binaryOp s Ex.AssocLeft
-- Precedence levels taken from this table:
-- http://en.wikipedia.org/wiki/Operators_in_C_and_C%2B%2B#Operator_precedence
opTable :: Ex.OperatorTable String () Identity Expr
opTable = [ [
-- C Precedence Level 6
lassocOp "+"
, lassocOp "-"
] , [
-- C Precedence Level 7
lassocOp "=="
] , [
-- C Precedence Level 16
lassocOp "="
] ]
term :: Parser Expr
term =
int
<|> parens expr
<|> try functionCall
<|> (identifier >>= (return . Identifier))
expr :: Parser Expr
expr = Ex.buildExpressionParser opTable term
functionCall :: Parser Expr
functionCall = do
name <- identifier
args <- parens (commaSep expr)
return $ FuncCall name args
-- END EXPRESSION PARSER, BEGIN AST PARSER
translationUnit :: Parser Expr
translationUnit = functionDefinition
functionDefinition :: Parser Expr
functionDefinition = do
retType <- fullType
name <- identifier
arguments <- parens (commaSep variable)
body <- braces statements
return $ FuncDef name retType arguments body
statement :: Parser Expr
statement = (try selectionStatement)
<|> (try jumpStatement)
-- <|> (try assignment)
<|> (try expressionStatement)
<|> variableDeclaration
statements :: Parser [Expr]
--statements = statement `sepEndBy` semi
statements = many statement
{-
assignment :: Parser Expr
assignment = do
lval <- expr
reservedOp "="
rval <- expr
return $ Assignment lval rval
-}
selectionStatement :: Parser Expr
selectionStatement = ifClause
jumpStatement :: Parser Expr
jumpStatement = do
reserved "return"
result <- optionMaybe expr
semi
return $ Return result
expressionStatement :: Parser Expr
expressionStatement = (semi >> return Pass) <|> do
val <- expr
semi
return val
ifClause :: Parser Expr
ifClause = do
reserved "if"
cond <- parens expr
body <- braces statements
return $ If cond body
variableDeclaration :: Parser Expr
variableDeclaration = VariableDecl <$> variable
variable :: Parser Symbol
variable = do
typ <- fullType
name <- identifier
return $ Symbol name typ
primType :: Parser PrimType
primType = reserved "int" >> return Int
fullType :: Parser Type
fullType = do
prim <- primType
asterisks <- many (symbol "*")
return $ Type prim (length asterisks)
-- END GRAMMAR
contents :: Parser a -> Parser a
contents p = do
Tok.whiteSpace lexer
r <- p
eof
return r
program = many translationUnit
-- Number Parsing
int :: Parser Expr
int = try binary <|> try hexadecimal <|> try octal <|> decimal
decimal :: Parser Expr
decimal = liftM (Const . read) $ many1 digit
hexadecimal :: Parser Expr
hexadecimal = liftM (Const . extractNum . readHex) $ hexPrefix >> (many1 hexDigit)
where hexPrefix = (try (string "0x") <|> string "0X")
octal :: Parser Expr
octal = liftM (Const . extractNum . readOct) $ octalPrefix >> (many1 octDigit)
where octalPrefix = char '0'
binary :: Parser Expr
binary = liftM (Const . readBin) $ binaryPrefix >> (many1 binDigit)
where binaryPrefix = string "0b"
binDigit = oneOf "01"
-- Extract the number from the result of readHex and readOctal
extractNum :: [(Int, String)] -> Int
extractNum = fst . head
readBin :: Integral a => String -> a
readBin s = (fst . head) $ readInt 2 (`elem` "01") digitToInt s
| teiche/femtocc | Parser.hs | bsd-3-clause | 5,701 | 0 | 11 | 1,615 | 1,517 | 782 | 735 | 144 | 1 |
module SpecHelper (module X) where
import Test.Hspec as X
import Test.Hspec.QuickCheck as X
| zalora/Angel | test/SpecHelper.hs | bsd-3-clause | 93 | 0 | 4 | 14 | 25 | 18 | 7 | 3 | 0 |
import Distribution.PackageDescription
import Distribution.PackageDescription.Parse
import Distribution.Verbosity
import Distribution.System
import Distribution.Simple
import Distribution.Simple.Utils
import Distribution.Simple.Setup
import Distribution.Simple.Command
import Distribution.Simple.Program
import Distribution.Simple.LocalBuildInfo
import Distribution.Simple.PreProcess hiding (ppC2hs)
import Control.Exception
import Control.Monad
import System.Exit
import System.FilePath
import System.Directory
import System.Environment
import System.IO.Error hiding (catch)
import Prelude hiding (catch)
-- Replicate the invocation of the postConf script, so that we can insert the
-- arguments of --extra-include-dirs and --extra-lib-dirs as paths in CPPFLAGS
-- and LDFLAGS into the environment
--
main :: IO ()
main = defaultMainWithHooks customHooks
where
preprocessors = hookedPreProcessors autoconfUserHooks
customHooks = autoconfUserHooks {
preConf = preConfHook,
postConf = postConfHook,
hookedPreProcessors = ("chs",ppC2hs) : filter (\x -> fst x /= "chs") preprocessors
}
preConfHook :: Args -> ConfigFlags -> IO HookedBuildInfo
preConfHook args flags = do
let verbosity = fromFlag (configVerbosity flags)
confExists <- doesFileExist "configure"
unless confExists $ do
code <- rawSystemExitCode verbosity "autoconf" []
case code of
ExitSuccess -> return ()
ExitFailure c -> die $ "autoconf exited with code " ++ show c
preConf autoconfUserHooks args flags
postConfHook :: Args -> ConfigFlags -> PackageDescription -> LocalBuildInfo -> IO ()
postConfHook args flags pkg_descr lbi
= let verbosity = fromFlag (configVerbosity flags)
in do
noExtraFlags args
confExists <- doesFileExist "configure"
if confExists
then runConfigureScript verbosity False flags lbi
else die "configure script not found."
pbi <- getHookedBuildInfo verbosity
let pkg_descr' = updatePackageDescription pbi pkg_descr
postConf simpleUserHooks args flags pkg_descr' lbi
runConfigureScript :: Verbosity -> Bool -> ConfigFlags -> LocalBuildInfo -> IO ()
runConfigureScript verbosity backwardsCompatHack flags lbi = do
env <- getEnvironment
(ccProg, ccFlags) <- configureCCompiler verbosity (withPrograms lbi)
let env' = foldr appendToEnvironment env
[("CC", ccProg)
,("CFLAGS", unwords ccFlags)
,("CPPFLAGS", unwords $ map ("-I"++) (configExtraIncludeDirs flags))
,("LDFLAGS", unwords $ map ("-L"++) (configExtraLibDirs flags))
]
handleNoWindowsSH $ rawSystemExitWithEnv verbosity "sh" args env'
where
args = "configure" : configureArgs backwardsCompatHack flags
appendToEnvironment (key, val) [] = [(key, val)]
appendToEnvironment (key, val) (kv@(k, v) : rest)
| key == k = (key, v ++ " " ++ val) : rest
| otherwise = kv : appendToEnvironment (key, val) rest
handleNoWindowsSH action
| buildOS /= Windows
= action
| otherwise
= action
`catch` \ioe -> if isDoesNotExistError ioe
then die notFoundMsg
else throwIO ioe
notFoundMsg = "The package has a './configure' script. This requires a "
++ "Unix compatibility toolchain such as MinGW+MSYS or Cygwin."
getHookedBuildInfo :: Verbosity -> IO HookedBuildInfo
getHookedBuildInfo verbosity = do
maybe_infoFile <- defaultHookedPackageDesc
case maybe_infoFile of
Nothing -> return emptyHookedBuildInfo
Just infoFile -> do
info verbosity $ "Reading parameters from " ++ infoFile
readHookedBuildInfo verbosity infoFile
-- Replicate the default C2HS preprocessor hook here, and inject a value for
-- extra-c2hs-options, if it was present in the buildinfo file
--
-- Everything below copied from Distribution.Simple.PreProcess
--
ppC2hs :: BuildInfo -> LocalBuildInfo -> PreProcessor
ppC2hs bi lbi
= PreProcessor {
platformIndependent = False,
runPreProcessor = \(inBaseDir, inRelativeFile)
(outBaseDir, outRelativeFile) verbosity ->
rawSystemProgramConf verbosity c2hsProgram (withPrograms lbi) . filter (not . null) $
maybe [] words (lookup "x-extra-c2hs-options" (customFieldsBI bi))
++ ["--include=" ++ outBaseDir]
++ ["--cppopts=" ++ opt | opt <- getCppOptions bi lbi]
++ ["--output-dir=" ++ outBaseDir,
"--output=" ++ outRelativeFile,
inBaseDir </> inRelativeFile]
}
getCppOptions :: BuildInfo -> LocalBuildInfo -> [String]
getCppOptions bi lbi
= hcDefines (compiler lbi)
++ ["-I" ++ dir | dir <- includeDirs bi]
++ [opt | opt@('-':c:_) <- ccOptions bi, c `elem` "DIU"]
hcDefines :: Compiler -> [String]
hcDefines comp =
case compilerFlavor comp of
GHC -> ["-D__GLASGOW_HASKELL__=" ++ versionInt version]
JHC -> ["-D__JHC__=" ++ versionInt version]
NHC -> ["-D__NHC__=" ++ versionInt version]
Hugs -> ["-D__HUGS__"]
_ -> []
where version = compilerVersion comp
-- TODO: move this into the compiler abstraction
-- FIXME: this forces GHC's crazy 4.8.2 -> 408 convention on all the other
-- compilers. Check if that's really what they want.
versionInt :: Version -> String
versionInt (Version { versionBranch = [] }) = "1"
versionInt (Version { versionBranch = [n] }) = show n
versionInt (Version { versionBranch = n1:n2:_ })
= -- 6.8.x -> 608
-- 6.10.x -> 610
let s1 = show n1
s2 = show n2
middle = case s2 of
_ : _ : _ -> ""
_ -> "0"
in s1 ++ middle ++ s2
| phaazon/cuda | Setup.hs | bsd-3-clause | 5,949 | 0 | 16 | 1,559 | 1,444 | 754 | 690 | 119 | 5 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StandaloneDeriving #-}
-- #hide
module Data.Thyme.LocalTime.Internal where
import Prelude hiding ((.))
import Control.Applicative
import Control.Category hiding (id)
import Control.Lens
import Control.Monad
import Data.AffineSpace
import Data.Data
import Data.Micro
import Data.Thyme.Calendar
import Data.Thyme.Clock.Scale
import Data.Thyme.Clock.UTC
#if !SHOW_INTERNAL
import Data.Thyme.Format.Internal
#endif
import Data.Thyme.LocalTime.TimeZone
import Data.VectorSpace
------------------------------------------------------------------------
-- * Time of day
type Hour = Int
type Minute = Int
data TimeOfDay = TimeOfDay
{ todHour :: {-# UNPACK #-}!Hour
, todMin :: {-# UNPACK #-}!Minute
, todSec :: {-# UNPACK #-}!DiffTime
} deriving (Eq, Ord, Data, Typeable)
#if SHOW_INTERNAL
deriving instance Show TimeOfDay
#else
instance Show TimeOfDay where
showsPrec _ (TimeOfDay h m (DiffTime s))
= shows02 h . (:) ':' . shows02 m . (:) ':'
. shows02 (fromIntegral si) . frac where
(si, Micro su) = microQuotRem s (Micro 1000000)
frac = if su == 0 then id else (:) '.' . fills06 su . drops0 su
#endif
{-# INLINE makeTimeOfDayValid #-}
makeTimeOfDayValid :: Hour -> Minute -> DiffTime -> Maybe TimeOfDay
makeTimeOfDayValid h m s@(DiffTime u) = TimeOfDay h m s
<$ guard (0 <= h && h <= 23 && 0 <= m && m <= 59)
<* guard (Micro 0 <= u && u < Micro 61000000)
{-# INLINE timeOfDay #-}
timeOfDay :: Simple Iso DiffTime TimeOfDay
timeOfDay = iso fromDiff toDiff where
{-# INLINEABLE fromDiff #-}
fromDiff :: DiffTime -> TimeOfDay
fromDiff (DiffTime t) = TimeOfDay
(fromIntegral h) (fromIntegral m) (DiffTime s) where
(h, ms) = microQuotRem t (toMicro 3600)
(m, s) = microQuotRem ms (toMicro 60)
{-# INLINEABLE toDiff #-}
toDiff :: TimeOfDay -> DiffTime
toDiff (TimeOfDay h m s) = s
^+^ fromIntegral m *^ DiffTime (toMicro 60)
^+^ fromIntegral h *^ DiffTime (toMicro 3600)
type Minutes = Int
type Days = Int
-- | Add some minutes to a 'TimeOfDay'; result comes with a day adjustment.
{-# INLINE addMinutes #-}
addMinutes :: Minutes -> TimeOfDay -> (Days, TimeOfDay)
addMinutes dm (TimeOfDay h m s) = (dd, TimeOfDay h' m' s) where
(dd, h') = divMod (h + dh) 24
(dh, m') = divMod (m + dm) 60
{-# INLINE timeOfDayFraction #-}
timeOfDayFraction :: Simple Iso Rational TimeOfDay
timeOfDayFraction = iso fromRatio toRatio . timeOfDay where
NominalDiffTime posixDay = posixDayLength
fromRatio :: Rational -> DiffTime
fromRatio r = DiffTime (r *^ posixDay)
toRatio :: DiffTime -> Rational
toRatio (DiffTime t) = t ^/^ posixDay
------------------------------------------------------------------------
-- * Local Time
data LocalTime = LocalTime
{ localDay :: {-# UNPACK #-}!Day
, localTimeOfDay :: {-only 3 words…-} {-# UNPACK #-}!TimeOfDay
} deriving (Eq, Ord, Data, Typeable)
#if SHOW_INTERNAL
deriving instance Show LocalTime
#else
instance Show LocalTime where
showsPrec p (LocalTime d t) = showsPrec p d . (:) ' ' . showsPrec p t
#endif
{-# INLINE utcLocalTime #-}
utcLocalTime :: TimeZone -> Simple Iso UTCTime LocalTime
utcLocalTime TimeZone {..} = utcTime . iso localise globalise where
{-# INLINEABLE localise #-}
localise :: UTCView -> LocalTime
localise (UTCTime day dt) = LocalTime (day .+^ dd) tod where
(dd, tod) = addMinutes timeZoneMinutes (view timeOfDay dt)
{-# INLINEABLE globalise #-}
globalise :: LocalTime -> UTCView
globalise (LocalTime day tod) = UTCTime (day .+^ dd)
(review timeOfDay utcToD) where
(dd, utcToD) = addMinutes (negate timeZoneMinutes) tod
-- TODO: ut1LocalTime
------------------------------------------------------------------------
-- * Zoned Time
data ZonedTime = ZonedTime
{ zonedTimeToLocalTime :: {-only 4 words…-} {-# UNPACK #-}!LocalTime
, zonedTimeZone :: !TimeZone
} deriving (Eq, Ord, Data, Typeable)
{-# INLINE zonedTime #-}
zonedTime :: Simple Iso (TimeZone, UTCTime) ZonedTime
zonedTime = iso toZoned fromZoned where
{-# INLINE toZoned #-}
toZoned :: (TimeZone, UTCTime) -> ZonedTime
toZoned (tz, time) = ZonedTime (view (utcLocalTime tz) time) tz
{-# INLINE fromZoned #-}
fromZoned :: ZonedTime -> (TimeZone, UTCTime)
fromZoned (ZonedTime lt tz) = (tz, review (utcLocalTime tz) lt)
#if SHOW_INTERNAL
deriving instance Show ZonedTime
#else
instance Show ZonedTime where
showsPrec p (ZonedTime lt tz) = showsPrec p lt . (:) ' ' . showsPrec p tz
#endif
| ekmett/thyme | src/Data/Thyme/LocalTime/Internal.hs | bsd-3-clause | 4,716 | 0 | 15 | 962 | 1,120 | 619 | 501 | 98 | 1 |
{-# LANGUAGE CPP #-}
import Control.Monad
import Data.IORef
import Control.Exception (SomeException, catch)
import Distribution.Simple
import Distribution.Simple.BuildPaths (autogenModulesDir)
import Distribution.Simple.InstallDirs as I
import Distribution.Simple.LocalBuildInfo as L
import qualified Distribution.Simple.Setup as S
import qualified Distribution.Simple.Program as P
import Distribution.Simple.Utils (createDirectoryIfMissingVerbose, rewriteFile)
import Distribution.PackageDescription
import Distribution.Text
import System.Environment
import System.Exit
import System.FilePath ((</>), splitDirectories,isAbsolute)
import System.Directory
import qualified System.FilePath.Posix as Px
import System.Process
import qualified Data.Text as T
import qualified Data.Text.IO as TIO
-- After Idris is built, we need to check and install the prelude and other libs
-- -----------------------------------------------------------------------------
-- Idris Command Path
-- make on mingw32 exepects unix style separators
#ifdef mingw32_HOST_OS
(<//>) = (Px.</>)
idrisCmd local = Px.joinPath $ splitDirectories $ ".." <//> ".." <//> buildDir local <//> "idris" <//> "idris"
#else
idrisCmd local = ".." </> ".." </> buildDir local </> "idris" </> "idris"
#endif
-- -----------------------------------------------------------------------------
-- Make Commands
-- use GNU make on FreeBSD
#if defined(freebsd_HOST_OS) || defined(dragonfly_HOST_OS)
mymake = "gmake"
#else
mymake = "make"
#endif
make verbosity =
P.runProgramInvocation verbosity . P.simpleProgramInvocation mymake
-- -----------------------------------------------------------------------------
-- Flags
usesGMP :: S.ConfigFlags -> Bool
usesGMP flags =
case lookup (FlagName "gmp") (S.configConfigurationsFlags flags) of
Just True -> True
Just False -> False
Nothing -> True
isRelease :: S.ConfigFlags -> Bool
isRelease flags =
case lookup (FlagName "release") (S.configConfigurationsFlags flags) of
Just True -> True
Just False -> False
Nothing -> False
isFreestanding :: S.ConfigFlags -> Bool
isFreestanding flags =
case lookup (FlagName "freestanding") (S.configConfigurationsFlags flags) of
Just True -> True
Just False -> False
Nothing -> False
-- -----------------------------------------------------------------------------
-- Clean
idrisClean _ flags _ _ = do
cleanStdLib
where
verbosity = S.fromFlag $ S.cleanVerbosity flags
cleanStdLib = do
makeClean "libs"
makeClean dir = make verbosity [ "-C", dir, "clean", "IDRIS=idris" ]
-- -----------------------------------------------------------------------------
-- Configure
gitHash :: IO String
gitHash = do h <- Control.Exception.catch (readProcess "git" ["rev-parse", "--short", "HEAD"] "")
(\e -> let e' = (e :: SomeException) in return "PRE")
return $ takeWhile (/= '\n') h
-- Put the Git hash into a module for use in the program
-- For release builds, just put the empty string in the module
generateVersionModule verbosity dir release = do
hash <- gitHash
let versionModulePath = dir </> "Version_idris" Px.<.> "hs"
putStrLn $ "Generating " ++ versionModulePath ++
if release then " for release" else (" for prerelease " ++ hash)
createDirectoryIfMissingVerbose verbosity True dir
rewriteFile versionModulePath (versionModuleContents hash)
where versionModuleContents h = "module Version_idris where\n\n" ++
"gitHash :: String\n" ++
if release
then "gitHash = \"\"\n"
else "gitHash = \"-git:" ++ h ++ "\"\n"
-- Generate a module that contains the lib path for a freestanding Idris
generateTargetModule verbosity dir targetDir = do
absPath <- return $ isAbsolute targetDir
let targetModulePath = dir </> "Target_idris" Px.<.> "hs"
putStrLn $ "Generating " ++ targetModulePath
createDirectoryIfMissingVerbose verbosity True dir
rewriteFile targetModulePath (versionModuleContents absPath targetDir)
where versionModuleContents absolute td = "module Target_idris where\n\n" ++
"import System.FilePath\n" ++
"import System.Environment\n" ++
"getDataDir :: IO String\n" ++
if absolute
then "getDataDir = return \"" ++ td ++ "\"\n"
else "getDataDir = do \n" ++
" expath <- getExecutablePath\n" ++
" execDir <- return $ dropFileName expath\n" ++
" return $ execDir ++ \"" ++ td ++ "\"\n"
++ "getDataFileName :: FilePath -> IO FilePath\n"
++ "getDataFileName name = do\n"
++ " dir <- getDataDir\n"
++ " return (dir ++ \"/\" ++ name)"
idrisConfigure _ flags _ local = do
configureRTS
generateVersionModule verbosity (autogenModulesDir local) (isRelease (configFlags local))
when (isFreestanding $ configFlags local) (do
targetDir <- lookupEnv "IDRIS_INSTALL_DIR"
case targetDir of
Just d -> generateTargetModule verbosity (autogenModulesDir local) d
Nothing -> error $ "Trying to build freestanding without a target directory."
++ " Set it by defining IDRIS_INSTALL_DIR.")
where
verbosity = S.fromFlag $ S.configVerbosity flags
version = pkgVersion . package $ localPkgDescr local
-- This is a hack. I don't know how to tell cabal that a data file needs
-- installing but shouldn't be in the distribution. And it won't make the
-- distribution if it's not there, so instead I just delete
-- the file after configure.
configureRTS = make verbosity ["-C", "rts", "clean"]
idrisPreSDist args flags = do
let dir = S.fromFlag (S.sDistDirectory flags)
let verb = S.fromFlag (S.sDistVerbosity flags)
generateVersionModule verb ("src") True
generateTargetModule verb "src" "./libs"
preSDist simpleUserHooks args flags
idrisPostSDist args flags desc lbi = do
Control.Exception.catch (do let file = "src" </> "Version_idris" Px.<.> "hs"
let targetFile = "src" </> "Target_idris" Px.<.> "hs"
putStrLn $ "Removing generated modules:\n "
++ file ++ "\n" ++ targetFile
removeFile file
removeFile targetFile)
(\e -> let e' = (e :: SomeException) in return ())
postSDist simpleUserHooks args flags desc lbi
-- -----------------------------------------------------------------------------
-- Build
getVersion :: Args -> S.BuildFlags -> IO HookedBuildInfo
getVersion args flags = do
hash <- gitHash
let buildinfo = (emptyBuildInfo { cppOptions = ["-DVERSION="++hash] }) :: BuildInfo
return (Just buildinfo, [])
idrisBuild _ flags _ local = do
buildStdLib
buildRTS
where
verbosity = S.fromFlag $ S.buildVerbosity flags
buildStdLib = do
putStrLn "Building libraries..."
makeBuild "libs"
where
makeBuild dir = make verbosity [ "-C", dir, "build" , "IDRIS=" ++ idrisCmd local]
buildRTS = make verbosity (["-C", "rts", "build"] ++
gmpflag (usesGMP (configFlags local)))
gmpflag False = []
gmpflag True = ["GMP=-DIDRIS_GMP"]
-- -----------------------------------------------------------------------------
-- Copy/Install
idrisInstall verbosity copy pkg local = do
installStdLib
installRTS
where
target = datadir $ L.absoluteInstallDirs pkg local copy
installStdLib = do
putStrLn $ "Installing libraries in " ++ target
makeInstall "libs" target
installRTS = do
let target' = target </> "rts"
putStrLn $ "Installing run time system in " ++ target'
makeInstall "rts" target'
makeInstall src target =
make verbosity [ "-C", src, "install" , "TARGET=" ++ target, "IDRIS=" ++ idrisCmd local]
-- -----------------------------------------------------------------------------
-- Main
-- Install libraries during both copy and install
-- See http://hackage.haskell.org/trac/hackage/ticket/718
main = defaultMainWithHooks $ simpleUserHooks
{ postClean = idrisClean
, postConf = idrisConfigure
, postBuild = idrisBuild
, postCopy = \_ flags pkg local ->
idrisInstall (S.fromFlag $ S.copyVerbosity flags)
(S.fromFlag $ S.copyDest flags) pkg local
, postInst = \_ flags pkg local ->
idrisInstall (S.fromFlag $ S.installVerbosity flags)
NoCopyDest pkg local
, preSDist = idrisPreSDist --do { putStrLn (show args) ; putStrLn (show flags) ; return emptyHookedBuildInfo }
, postSDist = idrisPostSDist
}
| andyarvanitis/Idris-dev | Setup.hs | bsd-3-clause | 9,391 | 0 | 17 | 2,629 | 1,831 | 955 | 876 | 154 | 3 |
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE UnicodeSyntax #-}
module CoinExchange.Models
(
Entities.migrateAll
, MonadDB
, Doge
, ValidationError(..)
, Name
, name
, registerDoge
, richDoges
, dogeAssets
) where
import CoinExchange.Entities as Entities
import qualified Control.Lens as Lens
import Control.Monad.IO.Class (MonadIO)
import Control.Monad.Trans.Reader (ReaderT)
import qualified Data.Maybe as Maybe
import qualified Data.Ratio as Ratio
import qualified Data.Text as T
import Database.Esqueleto
import qualified Database.Persist.Postgresql as PG
-- * Doge Names
--
-- Define a type @Name@ and smart constructor @name@ for doge names.
--
-- The smart constructor validates that doges attempting to join the exchange
-- are not in fact cats in disguise.
newtype Name = Name T.Text
deriving (Show)
data ValidationError = EmptyNameError | SneakyCatError deriving (Show)
name ∷ T.Text → Either ValidationError Name
name = validate . T.strip
where
validate "" = Left EmptyNameError
validate "cat" = Left SneakyCatError
validate n = Right (Name n)
-- * Type aliases
--
-- Use existential types to hide some unneeded type variables.
type MonadDB a = ∀ m. MonadIO m ⇒ ReaderT PG.SqlBackend m a
type Query a = ∀ m. MonadIO m ⇒ SqlPersistT m a
-- * Queries
registerDoge ∷ Name → MonadDB (Key Doge, Key DogeName)
registerDoge (Name nm) = do
doge ← PG.insert Doge
dogeName ← PG.insert $ DogeName doge nm
pure (doge, dogeName)
newtype TotalCoins = TotalCoins Integer
deriving (Show)
newtype Id = Id Integer
deriving (Show)
dogeAssets ∷ MonadIO m ⇒ ReaderT SqlBackend m [(Id, TotalCoins)]
dogeAssets = do
assets ←
select $ from $ \wallet → do
groupBy (wallet ^. WalletDogeId)
let total = sum_ (wallet ^. WalletCoins)
pure (wallet ^. WalletDogeId, total)
pure $ map fromSql assets
where
fromSql = Lens.bimap toId sumToCoins
toId = Id . toInteger . unSqlBackendKey . unDogeKey . unValue
sumToCoins = TotalCoins . Ratio.numerator . Maybe.fromMaybe 0 . unValue
type MinCoins = Int
richDoges ∷ MinCoins → Query [Name]
richDoges threshold = do
names ←
select $ distinct $
from $ \(dogeName `InnerJoin` wallet) → do
on (wallet ^. WalletDogeId ==. dogeName ^. DogeNameDogeId)
where_ (wallet ^. WalletCoins >=. val threshold)
pure dogeName
pure $ map (Name . dogeNameDogeName . entityVal) names
| chrisbarrett/haskell-databases-talk | src/CoinExchange/Models.hs | bsd-3-clause | 2,725 | 0 | 17 | 735 | 699 | 384 | 315 | 63 | 3 |
module Main where
import MemoryFS
import System.Fuse.Box
import Control.Concurrent.STM.TVar
import qualified Data.HashMap.Lazy as Map
import Data.HashMap.Lazy ( HashMap )
type MemoryMapTVar = TVar (HashMap Node ByteString)
newtype MemoryFS = MemoryFS MemoryMapTVar
main :: IO ()
main = do
putStrLn "hello world"
| RobertFischer/fusebox | MemoryFS/Main.hs | bsd-3-clause | 317 | 0 | 7 | 46 | 86 | 52 | 34 | 11 | 1 |
{-# LANGUAGE LambdaCase #-}
-- This file is part of Hoppy.
--
-- Copyright 2015-2016 Bryan Gardiner <[email protected]>
--
-- This program is free software: you can redistribute it and/or modify
-- it under the terms of the GNU Affero General Public License as published by
-- the Free Software Foundation, either version 3 of the License, or
-- (at your option) any later version.
--
-- This program is distributed in the hope that it will be useful,
-- but WITHOUT ANY WARRANTY; without even the implied warranty of
-- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-- GNU Affero General Public License for more details.
--
-- You should have received a copy of the GNU Affero General Public License
-- along with this program. If not, see <http://www.gnu.org/licenses/>.
import Foreign.Hoppy.Generator.Main (run)
import System.Environment (getArgs)
import System.Exit (exitFailure)
import Foreign.Hoppy.Generator.Spec
import Foreign.Hoppy.Generator.Std (mod_std)
import Graphics.UI.Cocos2d.Generator.Interface.Common
import Graphics.UI.Cocos2d.Generator.Interface.Texture
import Graphics.UI.Cocos2d.Generator.Interface.Director
import Graphics.UI.Cocos2d.Generator.Interface.Node
import Graphics.UI.Cocos2d.Generator.Interface.Layer
import Graphics.UI.Cocos2d.Generator.Interface.Scene
import Graphics.UI.Cocos2d.Generator.Interface.Sprite
import Graphics.UI.Cocos2d.Generator.Interface.Event
import Graphics.UI.Cocos2d.Generator.Interface.Label
import Graphics.UI.Cocos2d.Generator.Interface.Widget
import Graphics.UI.Cocos2d.Generator.Interface.Audio
import Graphics.UI.Cocos2d.Generator.Interface.CocoStudio
import Graphics.UI.Cocos2d.Generator.Interface.Utils
import Control.Monad
main :: IO ()
main = case interfaceResult of
Left errorMsg -> do
putStrLn $ "Error initializing interface: " ++ errorMsg
exitFailure
Right iface -> do
args <- getArgs
void $ run [iface] args
interfaceResult :: Either String Interface
interfaceResult =
interface "cocos2d"
[ mod_std
, mod_common
, mod_texture
, mod_director
, mod_node
, mod_layer
, mod_scene
, mod_sprite
, mod_event
, mod_label
, mod_widget
, mod_audio
, mod_cocostudio
, mod_utils
]
>>= interfaceAddHaskellModuleBase ["Graphics", "UI", "Cocos2d"]
| lynnard/cocos2d-hs | generator/Main.hs | bsd-3-clause | 2,305 | 0 | 12 | 342 | 342 | 225 | 117 | 46 | 2 |
{-# LANGUAGE MultiParamTypeClasses, FlexibleInstances, FunctionalDependencies #-}
-- |Monadic Iteratees:
-- incremental input parsers, processors and transformers
--
-- Support for IO enumerators
module Data.Yteratee.IO.ReadableChunk (
ReadableChunk (..)
)
where
import Prelude hiding (head, tail, dropWhile, length, splitAt )
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as L
import Data.Word
import Control.Monad.IO.Class
import Foreign.C
import Foreign.Ptr
import Foreign.Storable
import Foreign.Marshal.Array
-- |Class of streams which can be filled from a 'Ptr'. Typically these
-- are streams which can be read from a file, @Handle@, or similar resource.
--
--
class (Storable el) => ReadableChunk s el | s -> el where
readFromPtr ::
MonadIO m =>
Ptr el
-> Int -- ^ The pointer must not be used after @readFromPtr@ completes.
-> m s -- ^ The Int parameter is the length of the data in *bytes*.
instance ReadableChunk [Char] Char where
readFromPtr buf l = liftIO $ peekCAStringLen (castPtr buf, l)
instance ReadableChunk [Word8] Word8 where
readFromPtr buf l = liftIO $ peekArray l buf
instance ReadableChunk [Word16] Word16 where
readFromPtr buf l = liftIO $ peekArray l buf
instance ReadableChunk [Word32] Word32 where
readFromPtr buf l = liftIO $ peekArray l buf
instance ReadableChunk [Word] Word where
readFromPtr buf l = liftIO $ peekArray l buf
instance ReadableChunk B.ByteString Word8 where
readFromPtr buf l = liftIO $ B.packCStringLen (castPtr buf, l)
instance ReadableChunk L.ByteString Word8 where
readFromPtr buf l = liftIO $
return . L.fromChunks . (:[]) =<< readFromPtr buf l
| ierton/yteratee | src/Data/Yteratee/IO/ReadableChunk.hs | bsd-3-clause | 1,682 | 0 | 10 | 297 | 409 | 227 | 182 | 33 | 0 |
module Data.List.CommonSubstring where
import Data.SuffixTree
import Data.List
import Data.Ord
-- | This is the suffixtree based implementation.
-- | If there are multiple longest substrings, which one is returned
-- | is undefined.
longestSubstring ::(Eq a, Ord a) => [a] -> [a] -> [a]
longestSubstring first second = maximumBy (comparing length)
$ map (longestMatch $ construct second)
$ tails first
where longestMatch :: Eq a => STree a -> [a] -> [a]
longestMatch Leaf _ = []
longestMatch (Node edges) candidate =
maximumBy (comparing length) $ map (prefixMatch candidate) edges
prefixMatch :: Eq a => [a] -> Edge a -> [a]
prefixMatch candidate (p, tree)
| p' `isPrefixOf` candidate = p' ++ longestMatch tree (drop (length p') candidate)
| otherwise = commonSubstring p' candidate
where p' = prefix p
commonSubstring (a:as) (b:bs)
| a==b = a:commonSubstring as bs
| otherwise = []
commonSubstring _ _ = []
| mwotton/string-similarity | Data/List/CommonSubstring.hs | bsd-3-clause | 1,096 | 0 | 13 | 334 | 358 | 183 | 175 | 21 | 3 |
{-- snippet all --}
divBy :: Integral a => a -> [a] -> [a]
divBy numerator = map (numerator `div`)
{-- /snippet all --}
| binesiyu/ifl | examples/ch19/divby1.hs | mit | 120 | 0 | 8 | 24 | 46 | 26 | 20 | 2 | 1 |
-------------------------------------------------------------------------------
-- Linear implementation of Sets. Nodes are sorted and non-repeated
--
-- Data Structures. Grado en Informática. UMA.
-- Pepe Gallardo, 2012
-------------------------------------------------------------------------------
module DataStructures.Set.SortedLinearSet
( Set
, empty
, isEmpty
, size
, insert
, isElem
, delete
, fold
, union
, intersection
, difference
) where
import Data.List(intercalate)
import Test.QuickCheck
data Set a = Empty | Node a (Set a)
empty :: Set a
empty = Empty
isEmpty :: Set a -> Bool
isEmpty Empty = True
isEmpty _ = False
insert :: (Ord a) => a -> Set a -> Set a
insert x Empty = Node x Empty
insert x (Node y s)
| x < y = Node x (Node y s)
| x == y = Node y s
| otherwise = Node y (insert x s)
isElem :: (Ord a) => a -> Set a -> Bool
isElem x Empty = False
isElem x (Node y s)
| x < y = False
| otherwise = x==y || isElem x s
delete :: (Ord a) => a -> Set a -> Set a
delete x Empty = Empty
delete x (Node y s)
| x < y = Node y s
| x == y = s
| otherwise = Node y (delete x s)
size :: Set a -> Int
size Empty = 0
size (Node _ s) = 1 + size s
fold :: (a -> b -> b) -> b -> Set a -> b
fold f z = fun
where
fun Empty = z
fun (Node x s) = f x (fun s)
union :: (Ord a) => Set a -> Set a -> Set a
union s s' = fold insert s s'
difference :: (Ord a) => Set a -> Set a -> Set a
difference s s' = fold delete s s'
intersection :: (Ord a) => Set a -> Set a -> Set a
intersection s s' = fold (\x inter -> if isElem x s then insert x inter else inter) empty s'
-- Showing a set
instance (Show a) => Show (Set a) where
show s = "SortedLinearSet(" ++ intercalate "," (aux s) ++ ")"
where
aux Empty = []
aux (Node x s) = show x : aux s
-- Set equality
instance (Eq a) => Eq (Set a) where
Empty == Empty = True
(Node x s) == (Node x' s') = x==x' && s==s'
_ == _ = False
-- This instance is used by QuickCheck to generate random sets
instance (Ord a, Arbitrary a) => Arbitrary (Set a) where
arbitrary = do
xs <- listOf arbitrary
return (foldr insert empty xs)
| Saeron/haskell | data.structures/haskell/DataStructures/Set/SortedLinearSet.hs | apache-2.0 | 2,368 | 0 | 10 | 756 | 968 | 485 | 483 | 62 | 2 |
-----------------------------------------------------------------------------
-- |
-- Module : Network.Hackage.CabalInstall.Types
-- Copyright : (c) David Himmelstrup 2005
-- License : BSD-like
--
-- Maintainer : [email protected]
-- Stability : provisional
-- Portability : portable
--
-- All data types for the entire cabal-install system gathered here to avoid some .hs-boot files.
-----------------------------------------------------------------------------
module Network.Hackage.CabalInstall.Types where
import Distribution.Setup (CompilerFlavor(..),Compiler)
import Distribution.Package (PackageIdentifier)
import Distribution.Version (Dependency)
import System.IO (Handle)
data PkgInfo = PkgInfo
{ infoId :: PackageIdentifier
, infoDeps :: [Dependency]
, infoSynopsis :: String
, infoURL :: String
}
deriving (Show, Read, Eq)
data Action
= FetchCmd
| InstallCmd
| BuildDepCmd
| CleanCmd
| UpdateCmd
| InfoCmd
| HelpCmd
| ListCmd
data TempFlags = TempFlags {
tempHcFlavor :: Maybe CompilerFlavor,
tempHcPath :: Maybe FilePath, -- ^given compiler location
tempConfPath :: Maybe FilePath,
tempHcPkg :: Maybe FilePath, -- ^given hc-pkg location
tempPrefix :: Maybe FilePath,
tempServers :: [String], -- ^Available Hackage servers.
tempTarPath :: Maybe FilePath,
tempRunHc :: Maybe FilePath,
tempVerbose :: Int, -- ^verbosity level
-- tempUpgradeDeps :: Bool,
tempUser :: Bool, -- ^--user flag
tempUserIns :: Bool -- ^--user-install flag
}
data ConfigFlags = ConfigFlags {
configCompiler :: Compiler,
configConfPath :: FilePath,
configPrefix :: Maybe FilePath,
configServers :: [String], -- ^Available Hackage servers.
configTarPath :: FilePath,
configRunHc :: FilePath,
configOutputGen :: OutputGen,
configVerbose :: Int,
-- configUpgradeDeps :: Bool,
configUser :: Bool, -- ^--user flag
configUserIns :: Bool -- ^--user-install flag
}
data Flag
= GhcFlag | NhcFlag | HugsFlag
| WithCompiler FilePath | WithHcPkg FilePath
| WithConfPath FilePath | WithTarPath FilePath
| WithServer String
| UserFlag | GlobalFlag
| UserInstallFlag | GlobalInstallFlag
-- | UpgradeDeps
| HelpFlag
| Verbose Int
data OutputGen
= OutputGen
{ prepareInstall :: [(PackageIdentifier,[String],String)] -> IO ()
, pkgIsPresent :: PackageIdentifier -> IO ()
, downloadingPkg :: PackageIdentifier -> IO ()
, executingCmd :: String -> [String] -> IO ()
, cmdFailed :: String -> [String] -> Int -> IO () -- cmd, flags and errno.
, buildingPkg :: PackageIdentifier -> IO () -- Package is fetched and unpacked. Starting installation.
, stepConfigPkg :: PackageIdentifier -> IO ()
, stepBuildPkg :: PackageIdentifier -> IO ()
, stepInstallPkg :: PackageIdentifier -> IO ()
, stepFinishedPkg:: PackageIdentifier -> IO ()
, noSetupScript :: PackageIdentifier -> IO ()
, noCabalFile :: PackageIdentifier -> IO ()
, gettingPkgList :: String -> IO () -- Server.
, showPackageInfo :: Maybe FilePath -- pkg file if fetched.
-> Bool -- is installed
-> [String] -- Options
-> Dependency -- Which dependency is this package supposed to fill
-> (PackageIdentifier,String,[ResolvedPackage])
-> IO ()
, showOtherPackageInfo :: Maybe PackageIdentifier -- package if installed.
-> Dependency
-> IO () -- Show package which isn't available from any server.
, cmdStdout :: Maybe Handle
, cmdStderr :: Maybe Handle
}
data ResolvedPackage
= ResolvedPackage
{ fulfilling :: Dependency
, resolvedData :: Maybe ( PackageIdentifier -- pkg id
, String -- pkg location
, [ResolvedPackage] -- pkg dependencies
)
, pkgOptions :: [String]
} deriving Eq
data UnresolvedDependency
= UnresolvedDependency
{ dependency :: Dependency
, depOptions :: [String]
}
| alekar/hugs | packages/Cabal/Network/Hackage/CabalInstall/Types.hs | bsd-3-clause | 4,541 | 0 | 15 | 1,441 | 800 | 484 | 316 | 90 | 0 |
-- |
-- Analysis functions over the Cil AST.
--
module Language.Cil.Analysis (
opcodes
) where
import Language.Cil.Syntax
class Ast a where
-- A concatenated list of all opcodes.
opcodes :: a -> [OpCode]
instance Ast Assembly where
opcodes (Assembly _ _ td) = concatMap opcodes td
instance Ast TypeDef where
opcodes (Class _ _ _ _ cd) = concatMap opcodes cd
opcodes (GenericClass _ _ _ cd) = concatMap opcodes cd
instance Ast ClassDecl where
opcodes (FieldDef _) = []
opcodes (MethodDef md) = opcodes md
opcodes (TypeDef td) = opcodes td
instance Ast MethodDef where
opcodes (Constructor _ _ _ md) = [ o | OpCode o <- md ]
opcodes (Method _ _ _ _ md) = [ o | OpCode o <- md ]
| tomlokhorst/language-cil | src/Language/Cil/Analysis.hs | bsd-3-clause | 722 | 0 | 9 | 174 | 270 | 138 | 132 | 17 | 0 |
import Prelude
import FFI
main :: Fay ()
main =
case [1,2] of
[] -> alert "got []"
[a] -> alert "got one value."
[a,b] -> alert "got two values."
alert :: String -> Fay ()
alert = ffi "console.log(%1)"
| fpco/fay | examples/pat.hs | bsd-3-clause | 240 | 0 | 8 | 78 | 92 | 48 | 44 | 10 | 3 |
{-# OPTIONS -Wall #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
-- | Date/time showing functions.
module Data.Time.Show
(showDateTime)
where
import Data.Time (FormatTime,formatTime)
import System.Locale (defaultTimeLocale)
showDateTime :: FormatTime t => t -> String
showDateTime time = formatTime defaultTimeLocale "%F %T %Z" time
| plow-technologies/ircbrowse | src/Data/Time/Show.hs | bsd-3-clause | 372 | 0 | 6 | 58 | 70 | 41 | 29 | 9 | 1 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE RankNTypes #-}
#ifndef MIN_VERSION_base
#define MIN_VERSION_base(x,y,z) 1
#endif
-----------------------------------------------------------------------------
-- |
-- Module : Control.Applicative.Free.Final
-- Copyright : (C) 2012-2013 Edward Kmett
-- License : BSD-style (see the file LICENSE)
--
-- Maintainer : Edward Kmett <[email protected]>
-- Stability : provisional
-- Portability : GADTs, Rank2Types
--
-- Final encoding of free 'Applicative' functors.
----------------------------------------------------------------------------
module Control.Applicative.Free.Final
(
-- | Compared to the free monad, they are less expressive. However, they are also more
-- flexible to inspect and interpret, as the number of ways in which
-- the values can be nested is more limited.
Ap(..)
, runAp
, runAp_
, liftAp
, hoistAp
, retractAp
-- * Examples
-- $examples
) where
import Control.Applicative
import Data.Functor.Apply
#if !(MIN_VERSION_base(4,8,0))
import Data.Monoid
#endif
-- | The free 'Applicative' for a 'Functor' @f@.
newtype Ap f a = Ap { _runAp :: forall g. Applicative g => (forall x. f x -> g x) -> g a }
-- | Given a natural transformation from @f@ to @g@, this gives a canonical monoidal natural transformation from @'Ap' f@ to @g@.
--
-- prop> runAp t == retractApp . hoistApp t
runAp :: Applicative g => (forall x. f x -> g x) -> Ap f a -> g a
runAp phi m = _runAp m phi
-- | Perform a monoidal analysis over free applicative value.
--
-- Example:
--
-- @
-- count :: Ap f a -> Int
-- count = getSum . runAp_ (\\_ -> Sum 1)
-- @
runAp_ :: Monoid m => (forall a. f a -> m) -> Ap f b -> m
runAp_ f = getConst . runAp (Const . f)
instance Functor (Ap f) where
fmap f (Ap g) = Ap (\k -> fmap f (g k))
instance Apply (Ap f) where
Ap f <.> Ap x = Ap (\k -> f k <*> x k)
instance Applicative (Ap f) where
pure x = Ap (\_ -> pure x)
Ap f <*> Ap x = Ap (\k -> f k <*> x k)
-- | A version of 'lift' that can be used with just a 'Functor' for @f@.
liftAp :: f a -> Ap f a
liftAp x = Ap (\k -> k x)
-- | Given a natural transformation from @f@ to @g@ this gives a monoidal natural transformation from @Ap f@ to @Ap g@.
hoistAp :: (forall a. f a -> g a) -> Ap f b -> Ap g b
hoistAp f (Ap g) = Ap (\k -> g (k . f))
-- | Interprets the free applicative functor over f using the semantics for
-- `pure` and `<*>` given by the Applicative instance for f.
--
-- prop> retractApp == runAp id
retractAp :: Applicative f => Ap f a -> f a
retractAp (Ap g) = g id
{- $examples
<examples/ValidationForm.hs Validation form>
-}
| dalaing/free | src/Control/Applicative/Free/Final.hs | bsd-3-clause | 2,631 | 0 | 13 | 560 | 602 | 326 | 276 | 30 | 1 |
{- |
At the ZuriHac 2016 I worked on the new parsec-based parser for the *.cabal files.
The obvious test case is to compare new and old parser results for all of Hackage.
Traversing the Hackage is quite trivial. The difficult part is inspecting
the result 'GenericPackageDescription's to spot the difference.
In the same event, Andres Löh showed his library @generics-sop@. Obvious choice
to quickly put something together for the repetetive task. After all you can
compare records field-wise. And if sum constructors are different, that's
enough for our case as well!
Generic programming ftw.
-}
{-# LANGUAGE CPP #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DefaultSignatures #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE GADTs #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE UndecidableInstances #-}
-- | TODO: package as a library? is this useful elsewhere?
module StructDiff where
import Control.Applicative (liftA2)
import Data.Align.Key (AlignWithKey (..))
import Data.Foldable (Foldable, fold, traverse_)
import Data.Key (Key)
import Data.List (intercalate)
import Data.Map (Map)
import Data.Monoid (Monoid (..), (<>))
import Data.Singletons.Bool (SBool (..), SBoolI (..), eqToRefl)
import Data.These (These (..))
import Data.Type.Equality
import Generics.SOP
-- | Because @'Data.Proxy.Proxy' :: 'Data.Proxy.Proxy' a@ is so long.
data P a = P
-------------------------------------------------------------------------------
-- Structure diffs
-------------------------------------------------------------------------------
-- | Each thunk has a path, removed and added "stuff"
data DiffThunk = DiffThunk { dtPath :: [String], dtA :: String, dtB :: String }
deriving Show
-- | Diff result is a collection of thunks
data DiffResult = DiffResult [DiffThunk]
deriving Show
prefixThunk :: String -> DiffThunk -> DiffThunk
prefixThunk pfx (DiffThunk path a b) = DiffThunk (pfx : path) a b
prefixResult :: String -> DiffResult -> DiffResult
prefixResult name (DiffResult thunks) = DiffResult $ map (prefixThunk name) thunks
-- | Pretty print a result
prettyResultIO :: DiffResult -> IO ()
prettyResultIO (DiffResult []) = putStrLn "Equal"
prettyResultIO (DiffResult xs) = traverse_ p xs
where
p (DiffThunk paths a b) = do
putStrLn $ intercalate " " paths ++ " : "
putStrLn $ "- " ++ a
putStrLn $ "+ " ++ b
-- | We can join diff results
instance Monoid DiffResult where
mempty = DiffResult mempty
mappend (DiffResult x) (DiffResult y) = DiffResult (mappend x y)
-- | And we have a class for things we can diff
class Diff a where
diff :: a -> a -> DiffResult
default diff
:: (Generic a, HasDatatypeInfo a, All2 Diff (Code a))
=> a -> a -> DiffResult
diff = gdiff
-- | And generic implementation!
gdiff :: forall a. (Generic a, HasDatatypeInfo a, All2 Diff (Code a)) => a -> a -> DiffResult
gdiff x y = gdiffS (constructorInfo (P :: P a)) (unSOP $ from x) (unSOP $ from y)
gdiffS :: All2 Diff xss => NP ConstructorInfo xss -> NS (NP I) xss -> NS (NP I) xss -> DiffResult
gdiffS (c :* _) (Z xs) (Z ys) = mconcat $ hcollapse $ hczipWith3 (P :: P Diff) f (fieldNames c) xs ys
where
f :: Diff a => K FieldName a -> I a -> I a -> K DiffResult a
f (K fieldName) x y = K . prefixResult fieldName . unI $ liftA2 diff x y
gdiffS (_ :* cs) (S xss) (S yss) = gdiffS cs xss yss
gdiffS cs xs ys = DiffResult [DiffThunk [] (constructorNameOf cs xs) (constructorNameOf cs ys)]
eqDiff :: (Eq a, Show a) => a -> a -> DiffResult
eqDiff x y
| x == y = DiffResult []
| otherwise = DiffResult [DiffThunk [] (show x) (show y)]
alignDiff
:: (Show (Key f), Show a, Diff a, AlignWithKey f, Foldable f)
=> f a -> f a -> DiffResult
alignDiff x y = fold $ alignWithKey (\k -> prefixResult (show k) . f) x y
where
f (These a b) = diff a b
f (This a) = DiffResult [DiffThunk [] (show a) "<none>"]
f (That b) = DiffResult [DiffThunk [] "<none>" (show b)]
instance Diff Char where diff = eqDiff
instance Diff Bool
instance Diff a => Diff (Maybe a)
instance Diff Int where diff = eqDiff
instance (Diff a, Diff b) => Diff (Either a b)
instance (Diff a, Diff b) => Diff (a, b) where
diff (a, b) (a', b') =
prefixResult "_1" (diff a a') <>
prefixResult "_2" (diff b b')
instance (Diff a, Diff b, Diff c) => Diff (a, b, c) where
diff (a, b, c) (a', b', c') =
prefixResult "_1" (diff a a') <>
prefixResult "_2" (diff b b') <>
prefixResult "_3" (diff c c')
instance (SBoolI (a == Char), Show a, Diff a) => Diff [a] where
diff = case sbool :: SBool (a == Char) of
STrue -> case eqToRefl :: a :~: Char of
Refl -> eqDiff
SFalse -> alignDiff
instance (Ord k, Show k, Diff v, Show v) => Diff (Map k v) where diff = alignDiff
-------------------------------------------------------------------------------
-- SOP helpers
-------------------------------------------------------------------------------
constructorInfo :: (HasDatatypeInfo a, xss ~ Code a) => proxy a -> NP ConstructorInfo xss
constructorInfo p = case datatypeInfo p of
ADT _ _ cs -> cs
Newtype _ _ c -> c :* Nil
constructorNameOf :: NP ConstructorInfo xss -> NS f xss -> ConstructorName
constructorNameOf (c :* _) (Z _) = constructorName c
constructorNameOf (_ :* cs) (S xs) = constructorNameOf cs xs
#if __GLASGOW_HASKELL__ < 800
constructorNameOf _ _ = error "Should never happen"
#endif
constructorName :: ConstructorInfo xs -> ConstructorName
constructorName (Constructor name) = name
constructorName (Infix name _ _) = "(" ++ name ++ ")"
constructorName (Record name _) = name
-- | This is a little lie.
fieldNames :: ConstructorInfo xs -> NP (K FieldName) xs
fieldNames (Constructor name) = hpure (K name) -- TODO: add .1 .2 etc.
fieldNames (Infix name _ _) = K ("-(" ++ name ++ ")") :* K ("(" ++ name ++ ")-") :* Nil
fieldNames (Record _ fis) = hmap (\(FieldInfo fieldName) -> K fieldName) fis
| mydaum/cabal | Cabal/tests/StructDiff.hs | bsd-3-clause | 6,371 | 0 | 12 | 1,511 | 2,006 | 1,044 | 962 | 103 | 3 |
{-# LANGUAGE FlexibleInstances, MultiParamTypeClasses, TypeSynonymInstances #-}
-----------------------------------------------------------------------------
-- |
-- Module : XMonad.Layout.Column
-- Copyright : (c) 2009 Ilya Portnov
-- License : BSD3-style (see LICENSE)
--
-- Maintainer : Ilya Portnov <[email protected]>
-- Stability : unstable
-- Portability : unportable
--
-- Provides Column layout that places all windows in one column. Windows
-- heights are calculated from equation: H1/H2 = H2/H3 = ... = q, where q is
-- given. With Shrink/Expand messages you can change the q value.
--
-----------------------------------------------------------------------------
module XMonad.Layout.Column (
-- * Usage
-- $usage
Column (..)
) where
import XMonad
import qualified XMonad.StackSet as W
-- $usage
-- This module defines layot named Column. It places all windows in one
-- column. Windows heights are calculated from equation: H1/H2 = H2/H3 = ... =
-- q, where `q' is given (thus, windows heights are members of geometric
-- progression). With Shrink/Expand messages one can change the `q' value.
--
-- You can use this module by adding folowing in your @xmonad.hs@:
--
-- > import XMonad.Layout.Column
--
-- Then add layouts to your layoutHook:
--
-- > myLayoutHook = Column 1.6 ||| ...
--
-- In this example, each next window will have height 1.6 times less then
-- previous window.
data Column a = Column Float deriving (Read,Show)
instance LayoutClass Column a where
pureLayout = columnLayout
pureMessage = columnMessage
columnMessage :: Column a -> SomeMessage -> Maybe (Column a)
columnMessage (Column q) m = fmap resize (fromMessage m)
where resize Shrink = Column (q-0.1)
resize Expand = Column (q+0.1)
columnLayout :: Column a -> Rectangle -> W.Stack a -> [(a,Rectangle)]
columnLayout (Column q) rect stack = zip ws rects
where ws = W.integrate stack
n = length ws
heights = map (xn n rect q) [1..n]
ys = [fromIntegral $ sum $ take k heights | k <- [0..n-1]]
rects = map (mkRect rect) $ zip heights ys
mkRect :: Rectangle -> (Dimension,Position) -> Rectangle
mkRect (Rectangle xs ys ws _) (h,y) = Rectangle xs (ys+fromIntegral y) ws h
xn :: Int -> Rectangle -> Float -> Int -> Dimension
xn n (Rectangle _ _ _ h) q k = if q==1 then
h `div` (fromIntegral n)
else
round ((fromIntegral h)*q^(n-k)*(1-q)/(1-q^n))
| pjones/xmonad-test | vendor/xmonad-contrib/XMonad/Layout/Column.hs | bsd-2-clause | 2,640 | 0 | 13 | 679 | 545 | 304 | 241 | 26 | 2 |
{-# LANGUAGE Trustworthy #-}
{-# LANGUAGE CPP, NoImplicitPrelude, StandaloneDeriving #-}
{-# OPTIONS_HADDOCK hide #-}
-----------------------------------------------------------------------------
-- |
-- Module : GHC.Unicode
-- Copyright : (c) The University of Glasgow, 2003
-- License : see libraries/base/LICENSE
--
-- Maintainer : [email protected]
-- Stability : internal
-- Portability : non-portable (GHC extensions)
--
-- Implementations for the character predicates (isLower, isUpper, etc.)
-- and the conversions (toUpper, toLower). The implementation uses
-- libunicode on Unix systems if that is available.
--
-----------------------------------------------------------------------------
module GHC.Unicode (
GeneralCategory (..), generalCategory,
isAscii, isLatin1, isControl,
isAsciiUpper, isAsciiLower,
isPrint, isSpace, isUpper,
isLower, isAlpha, isDigit,
isOctDigit, isHexDigit, isAlphaNum,
isPunctuation, isSymbol,
toUpper, toLower, toTitle,
wgencat
) where
import GHC.Base
import GHC.Char (chr)
import GHC.Real
import GHC.Enum ( Enum (..), Bounded (..) )
import GHC.Arr ( Ix (..) )
import GHC.Num
-- Data.Char.chr already imports this and we need to define a Show instance
-- for GeneralCategory
import GHC.Show ( Show )
#include "HsBaseConfig.h"
-- | Unicode General Categories (column 2 of the UnicodeData table) in
-- the order they are listed in the Unicode standard (the Unicode
-- Character Database, in particular).
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> :t OtherLetter
-- OtherLetter :: GeneralCategory
--
-- 'Eq' instance:
--
-- >>> UppercaseLetter == UppercaseLetter
-- True
-- >>> UppercaseLetter == LowercaseLetter
-- False
--
-- 'Ord' instance:
--
-- >>> NonSpacingMark <= MathSymbol
-- True
--
-- 'Enum' instance:
--
-- >>> enumFromTo ModifierLetter SpacingCombiningMark
-- [ModifierLetter,OtherLetter,NonSpacingMark,SpacingCombiningMark]
--
-- 'Read' instance:
--
-- >>> read "DashPunctuation" :: GeneralCategory
-- DashPunctuation
-- >>> read "17" :: GeneralCategory
-- *** Exception: Prelude.read: no parse
--
-- 'Show' instance:
--
-- >>> show EnclosingMark
-- "EnclosingMark"
--
-- 'Bounded' instance:
--
-- >>> minBound :: GeneralCategory
-- UppercaseLetter
-- >>> maxBound :: GeneralCategory
-- NotAssigned
--
-- 'Ix' instance:
--
-- >>> import Data.Ix ( index )
-- >>> index (OtherLetter,Control) FinalQuote
-- 12
-- >>> index (OtherLetter,Control) Format
-- *** Exception: Error in array index
--
data GeneralCategory
= UppercaseLetter -- ^ Lu: Letter, Uppercase
| LowercaseLetter -- ^ Ll: Letter, Lowercase
| TitlecaseLetter -- ^ Lt: Letter, Titlecase
| ModifierLetter -- ^ Lm: Letter, Modifier
| OtherLetter -- ^ Lo: Letter, Other
| NonSpacingMark -- ^ Mn: Mark, Non-Spacing
| SpacingCombiningMark -- ^ Mc: Mark, Spacing Combining
| EnclosingMark -- ^ Me: Mark, Enclosing
| DecimalNumber -- ^ Nd: Number, Decimal
| LetterNumber -- ^ Nl: Number, Letter
| OtherNumber -- ^ No: Number, Other
| ConnectorPunctuation -- ^ Pc: Punctuation, Connector
| DashPunctuation -- ^ Pd: Punctuation, Dash
| OpenPunctuation -- ^ Ps: Punctuation, Open
| ClosePunctuation -- ^ Pe: Punctuation, Close
| InitialQuote -- ^ Pi: Punctuation, Initial quote
| FinalQuote -- ^ Pf: Punctuation, Final quote
| OtherPunctuation -- ^ Po: Punctuation, Other
| MathSymbol -- ^ Sm: Symbol, Math
| CurrencySymbol -- ^ Sc: Symbol, Currency
| ModifierSymbol -- ^ Sk: Symbol, Modifier
| OtherSymbol -- ^ So: Symbol, Other
| Space -- ^ Zs: Separator, Space
| LineSeparator -- ^ Zl: Separator, Line
| ParagraphSeparator -- ^ Zp: Separator, Paragraph
| Control -- ^ Cc: Other, Control
| Format -- ^ Cf: Other, Format
| Surrogate -- ^ Cs: Other, Surrogate
| PrivateUse -- ^ Co: Other, Private Use
| NotAssigned -- ^ Cn: Other, Not Assigned
deriving (Show, Eq, Ord, Enum, Bounded, Ix)
-- | The Unicode general category of the character. This relies on the
-- 'Enum' instance of 'GeneralCategory', which must remain in the
-- same order as the categories are presented in the Unicode
-- standard.
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> generalCategory 'a'
-- LowercaseLetter
-- >>> generalCategory 'A'
-- UppercaseLetter
-- >>> generalCategory '0'
-- DecimalNumber
-- >>> generalCategory '%'
-- OtherPunctuation
-- >>> generalCategory '♥'
-- OtherSymbol
-- >>> generalCategory '\31'
-- Control
-- >>> generalCategory ' '
-- Space
--
generalCategory :: Char -> GeneralCategory
generalCategory c = toEnum $ fromIntegral $ wgencat $ fromIntegral $ ord c
-- | Selects the first 128 characters of the Unicode character set,
-- corresponding to the ASCII character set.
isAscii :: Char -> Bool
isAscii c = c < '\x80'
-- | Selects the first 256 characters of the Unicode character set,
-- corresponding to the ISO 8859-1 (Latin-1) character set.
isLatin1 :: Char -> Bool
isLatin1 c = c <= '\xff'
-- | Selects ASCII lower-case letters,
-- i.e. characters satisfying both 'isAscii' and 'isLower'.
isAsciiLower :: Char -> Bool
isAsciiLower c = c >= 'a' && c <= 'z'
-- | Selects ASCII upper-case letters,
-- i.e. characters satisfying both 'isAscii' and 'isUpper'.
isAsciiUpper :: Char -> Bool
isAsciiUpper c = c >= 'A' && c <= 'Z'
-- | Selects control characters, which are the non-printing characters of
-- the Latin-1 subset of Unicode.
isControl :: Char -> Bool
-- | Selects printable Unicode characters
-- (letters, numbers, marks, punctuation, symbols and spaces).
isPrint :: Char -> Bool
-- | Returns 'True' for any Unicode space character, and the control
-- characters @\\t@, @\\n@, @\\r@, @\\f@, @\\v@.
isSpace :: Char -> Bool
-- isSpace includes non-breaking space
-- The magic 0x377 isn't really that magical. As of 2014, all the codepoints
-- at or below 0x377 have been assigned, so we shouldn't have to worry about
-- any new spaces appearing below there. It would probably be best to
-- use branchless ||, but currently the eqLit transformation will undo that,
-- so we'll do it like this until there's a way around that.
isSpace c
| uc <= 0x377 = uc == 32 || uc - 0x9 <= 4 || uc == 0xa0
| otherwise = iswspace (ord c) /= 0
where
uc = fromIntegral (ord c) :: Word
-- | Selects upper-case or title-case alphabetic Unicode characters (letters).
-- Title case is used by a small number of letter ligatures like the
-- single-character form of /Lj/.
isUpper :: Char -> Bool
-- | Selects lower-case alphabetic Unicode characters (letters).
isLower :: Char -> Bool
-- | Selects alphabetic Unicode characters (lower-case, upper-case and
-- title-case letters, plus letters of caseless scripts and modifiers letters).
-- This function is equivalent to 'Data.Char.isLetter'.
isAlpha :: Char -> Bool
-- | Selects alphabetic or numeric digit Unicode characters.
--
-- Note that numeric digits outside the ASCII range are selected by this
-- function but not by 'isDigit'. Such digits may be part of identifiers
-- but are not used by the printer and reader to represent numbers.
isAlphaNum :: Char -> Bool
-- | Selects ASCII digits, i.e. @\'0\'@..@\'9\'@.
isDigit :: Char -> Bool
isDigit c = (fromIntegral (ord c - ord '0') :: Word) <= 9
-- We use an addition and an unsigned comparison instead of two signed
-- comparisons because it's usually faster and puts less strain on branch
-- prediction. It likely also enables some CSE when combined with functions
-- that follow up with an actual conversion.
-- | Selects ASCII octal digits, i.e. @\'0\'@..@\'7\'@.
isOctDigit :: Char -> Bool
isOctDigit c = (fromIntegral (ord c - ord '0') :: Word) <= 7
-- | Selects ASCII hexadecimal digits,
-- i.e. @\'0\'@..@\'9\'@, @\'a\'@..@\'f\'@, @\'A\'@..@\'F\'@.
isHexDigit :: Char -> Bool
isHexDigit c = isDigit c ||
(fromIntegral (ord c - ord 'A')::Word) <= 5 ||
(fromIntegral (ord c - ord 'a')::Word) <= 5
-- | Selects Unicode punctuation characters, including various kinds
-- of connectors, brackets and quotes.
--
-- This function returns 'True' if its argument has one of the
-- following 'GeneralCategory's, or 'False' otherwise:
--
-- * 'ConnectorPunctuation'
-- * 'DashPunctuation'
-- * 'OpenPunctuation'
-- * 'ClosePunctuation'
-- * 'InitialQuote'
-- * 'FinalQuote'
-- * 'OtherPunctuation'
--
-- These classes are defined in the
-- <http://www.unicode.org/reports/tr44/tr44-14.html#GC_Values_Table Unicode Character Database>,
-- part of the Unicode standard. The same document defines what is
-- and is not a \"Punctuation\".
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> isPunctuation 'a'
-- False
-- >>> isPunctuation '7'
-- False
-- >>> isPunctuation '♥'
-- False
-- >>> isPunctuation '"'
-- True
-- >>> isPunctuation '?'
-- True
-- >>> isPunctuation '—'
-- True
--
isPunctuation :: Char -> Bool
isPunctuation c = case generalCategory c of
ConnectorPunctuation -> True
DashPunctuation -> True
OpenPunctuation -> True
ClosePunctuation -> True
InitialQuote -> True
FinalQuote -> True
OtherPunctuation -> True
_ -> False
-- | Selects Unicode symbol characters, including mathematical and
-- currency symbols.
--
-- This function returns 'True' if its argument has one of the
-- following 'GeneralCategory's, or 'False' otherwise:
--
-- * 'MathSymbol'
-- * 'CurrencySymbol'
-- * 'ModifierSymbol'
-- * 'OtherSymbol'
--
-- These classes are defined in the
-- <http://www.unicode.org/reports/tr44/tr44-14.html#GC_Values_Table Unicode Character Database>,
-- part of the Unicode standard. The same document defines what is
-- and is not a \"Symbol\".
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> isSymbol 'a'
-- False
-- >>> isSymbol '6'
-- False
-- >>> isSymbol '='
-- True
--
-- The definition of \"math symbol\" may be a little
-- counter-intuitive depending on one's background:
--
-- >>> isSymbol '+'
-- True
-- >>> isSymbol '-'
-- False
--
isSymbol :: Char -> Bool
isSymbol c = case generalCategory c of
MathSymbol -> True
CurrencySymbol -> True
ModifierSymbol -> True
OtherSymbol -> True
_ -> False
-- | Convert a letter to the corresponding upper-case letter, if any.
-- Any other character is returned unchanged.
toUpper :: Char -> Char
-- | Convert a letter to the corresponding lower-case letter, if any.
-- Any other character is returned unchanged.
toLower :: Char -> Char
-- | Convert a letter to the corresponding title-case or upper-case
-- letter, if any. (Title case differs from upper case only for a small
-- number of ligature letters.)
-- Any other character is returned unchanged.
toTitle :: Char -> Char
-- -----------------------------------------------------------------------------
-- Implementation with the supplied auto-generated Unicode character properties
-- table
-- Regardless of the O/S and Library, use the functions contained in WCsubst.c
isAlpha c = iswalpha (ord c) /= 0
isAlphaNum c = iswalnum (ord c) /= 0
isControl c = iswcntrl (ord c) /= 0
isPrint c = iswprint (ord c) /= 0
isUpper c = iswupper (ord c) /= 0
isLower c = iswlower (ord c) /= 0
toLower c = chr (towlower (ord c))
toUpper c = chr (towupper (ord c))
toTitle c = chr (towtitle (ord c))
foreign import ccall unsafe "u_iswalpha"
iswalpha :: Int -> Int
foreign import ccall unsafe "u_iswalnum"
iswalnum :: Int -> Int
foreign import ccall unsafe "u_iswcntrl"
iswcntrl :: Int -> Int
foreign import ccall unsafe "u_iswspace"
iswspace :: Int -> Int
foreign import ccall unsafe "u_iswprint"
iswprint :: Int -> Int
foreign import ccall unsafe "u_iswlower"
iswlower :: Int -> Int
foreign import ccall unsafe "u_iswupper"
iswupper :: Int -> Int
foreign import ccall unsafe "u_towlower"
towlower :: Int -> Int
foreign import ccall unsafe "u_towupper"
towupper :: Int -> Int
foreign import ccall unsafe "u_towtitle"
towtitle :: Int -> Int
foreign import ccall unsafe "u_gencat"
wgencat :: Int -> Int
| tolysz/prepare-ghcjs | spec-lts8/base/GHC/Unicode.hs | bsd-3-clause | 12,944 | 0 | 13 | 3,160 | 1,516 | 935 | 581 | 132 | 8 |
{-# LANGUAGE ScopedTypeVariables, PartialTypeSignatures #-}
module T10463 where
f (x :: _) = x ++ ""
| ghc-android/ghc | testsuite/tests/partial-sigs/should_compile/T10463.hs | bsd-3-clause | 103 | 0 | 7 | 18 | 24 | 14 | 10 | 3 | 1 |
{-# LANGUAGE PolyKinds #-}
{-# LANGUAGE FlexibleInstances #-}
module PolyKinds13 where
data Proxy a = Proxy
instance Show (Proxy a) where
show _ = "Proxy"
instance Functor Proxy where
fmap _ Proxy = Proxy
data TypeRep = TypeRep
class MyTypeable t where
-- MyTypeable :: forall k. k -> Constraint
myTypeOf :: Proxy t -> TypeRep
myTypeOf _ = TypeRep
data Apply f t = Apply (f t)
-- Apply :: forall k. (k -> *) -> k -> *
instance MyTypeable Apply
-- df :: forall k. MyTypeable ((k -> *) -> k -> *) (Apply k)
instance MyTypeable Int
instance MyTypeable Maybe
| forked-upstream-packages-for-ghcjs/ghc | testsuite/tests/polykinds/PolyKinds13.hs | bsd-3-clause | 585 | 0 | 8 | 134 | 131 | 70 | 61 | 16 | 0 |
{-# LANGUAGE UndecidableInstances #-}
{-# LANGUAGE DeriveTraversable #-}
{-# LANGUAGE DeriveFoldable #-}
{-# LANGUAGE DeriveFunctor #-}
{-# LANGUAGE TypeFamilies #-}
-----------------------------------------------------------------------------
-- |
-- Description : Subset, dense, total map implemented as a vector.
-- License : MIT
-- Maintainer : Paweł Nowak <[email protected]>
-- Portability : GHC only
--
-- Subset, dense, total map implemented as a vector.
-----------------------------------------------------------------------------
module Data.Total.Array.Subset (
Subset,
TotalSubsetArray(..)
) where
import Data.Bytes.Serial
import Data.Distributive
import Data.Functor.Rep
import Data.Key
import Data.Proxy
import Data.Reflection
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Total.Subset
import Data.Vector (Vector)
import qualified Data.Vector as Vector
import Linear
import Prelude ()
import Prelude.Compat hiding (zip, zipWith)
infixr 9 .:
-- | A total map from a subset s of keys k to values a, e.g. a restriction
-- of a partial function @k -> a@ to a subset of its domain on which the
-- function is defined. Implemented as a vector.
--
-- n is equal to the number of keys.
newtype TotalSubsetArray s k a = TotalSubsetArray (Vector a)
deriving (Eq, Ord, Show, Read, Functor, Foldable, Traversable)
keyCount :: Subset s k => Proxy s -> Int
keyCount p = Set.size (reflect p)
keys' :: Subset s k => Proxy s -> Vector k
keys' p = Vector.fromListN (keyCount p) $ Set.toAscList (reflect p)
toIndex :: (Ord k, Subset s k) => Proxy s -> k -> Int
toIndex p k = Set.findIndex k (reflect p)
-- | Maps each key to itself.
--
-- Complexity: O(n)
keys :: forall s k. Subset s k => TotalSubsetArray s k k
keys = TotalSubsetArray (keys' (Proxy :: Proxy s))
(.:) :: (c -> d) -> (a -> b -> c) -> a -> b -> d
(f .: g) x y = f (g x y)
-- | Zippy applicative. Complexity: 'pure' O(n), '<*>' O(n).
instance Subset s k => Applicative (TotalSubsetArray s k) where
pure = TotalSubsetArray . Vector.replicate (keyCount (Proxy :: Proxy s))
(<*>) = zap
-- Keys instances.
type instance Key (TotalSubsetArray s k) = k
-- | Complexity: 'mapWithKey' O(n)
instance Subset s k => Keyed (TotalSubsetArray s k) where
mapWithKey f v = zipWith f keys v
-- | Complexity: all O(n)
instance Zip (TotalSubsetArray s k) where
zipWith f (TotalSubsetArray a) (TotalSubsetArray b) =
TotalSubsetArray $ Vector.zipWith f a b
-- | Complexity: all O(n)
instance Subset s k => ZipWithKey (TotalSubsetArray s k) where
zipWithKey f a b = zipWith (uncurry . f) keys (zip a b)
-- | Complexity: 'lookup' O(log n)
instance (Ord k, Subset s k) => Lookup (TotalSubsetArray s k) where
lookup k (TotalSubsetArray v) =
Just $ Vector.unsafeIndex v (toIndex (Proxy :: Proxy s) k)
-- | Complexity: 'index' O(log n)
instance (Ord k, Subset s k) => Indexable (TotalSubsetArray s k) where
index (TotalSubsetArray v) k =
Vector.unsafeIndex v (toIndex (Proxy :: Proxy s) k)
-- | Complexity: 'adjust' O(n)
instance (Ord k, Subset s k) => Adjustable (TotalSubsetArray s k) where
adjust f k (TotalSubsetArray v) = TotalSubsetArray $ Vector.unsafeUpd v [(i, x)]
where
i = toIndex (Proxy :: Proxy s) k
x = f $ Vector.unsafeIndex v i
-- | Complexity: 'foldMapWithKey' O(n)
instance Subset s k => FoldableWithKey (TotalSubsetArray s k) where
foldMapWithKey f v = foldMap (uncurry f) (zip keys v)
-- | Complexity: 'traverseWithKey' O(n)
instance Subset s k => TraversableWithKey (TotalSubsetArray s k) where
traverseWithKey f v = traverse (uncurry f) (zip keys v)
-- Linear instances.
-- | Complexity: all O(n)
instance Subset s k => Additive (TotalSubsetArray s k) where
zero = pure 0
-- | Complexity: all O(n)
instance Subset s k => Metric (TotalSubsetArray s k)
-- Serial instances.
-- | Complexity: 'serializeWith' O(n), 'deserializeWith' O(n)
instance Subset s k => Serial1 (TotalSubsetArray s k) where
serializeWith f (TotalSubsetArray v) = Vector.mapM_ f v
deserializeWith f = TotalSubsetArray
<$> Vector.replicateM (keyCount (Proxy :: Proxy s)) f
-- | Complexity: 'serialize' O(n), 'deserialize' O(n)
instance (Subset s k, Serial a) => Serial (TotalSubsetArray s k a) where
serialize m = serializeWith serialize m
deserialize = deserializeWith deserialize
-- | Complexity: 'distribute' O(n * fmap)
instance Subset s k => Distributive (TotalSubsetArray s k) where
distribute x = TotalSubsetArray $ Vector.generate
(keyCount (Proxy :: Proxy s)) (\i -> fmap (index' i) x)
where
index' i (TotalSubsetArray v) = Vector.unsafeIndex v i
-- | Convert from and to a partial function that would be total if
-- restricted to s.
--
-- Complexity: tabulate O(n), index O(log n)
instance (Ord k, Subset s k) => Representable (TotalSubsetArray s k) where
type Rep (TotalSubsetArray s k) = k
tabulate f = fmap f keys
index = Data.Key.index
| pawel-n/total-maps | src/Data/Total/Array/Subset.hs | mit | 5,121 | 0 | 12 | 1,102 | 1,456 | 771 | 685 | -1 | -1 |
{-# LANGUAGE ExistentialQuantification, NamedFieldPuns #-}
module Main where
import Text.ParserCombinators.Parsec hiding (spaces)
import System.Environment
import Control.Applicative ((<$>))
import Control.Monad.Error
import Data.Maybe
import System.IO
import Data.IORef
type Env = IORef [(String, IORef LispVal)]
nullEnv :: IO Env
nullEnv = newIORef []
type IOThrowsError = ErrorT LispError IO
liftThrows :: ThrowsError a -> IOThrowsError a
liftThrows (Left err) = throwError err
liftThrows (Right val) = return val
runIOThrows :: IOThrowsError String -> IO String
runIOThrows action = do
result <- runErrorT (trapError action)
return $ extractValue result
isBound :: Env -> String -> IO Bool
isBound envRef var = do
env <- readIORef envRef
return . isJust $ lookup var env
getVar :: Env -> String -> IOThrowsError LispVal
getVar envRef var = do
env <- liftIO $ readIORef envRef
maybe (throwError $ UnboundVar "Unbound variable" var)
(liftIO . readIORef)
(lookup var env)
setVar :: Env -> String -> LispVal -> IOThrowsError LispVal
setVar envRef var value = do
env <- liftIO $ readIORef envRef
maybe (throwError $ UnboundVar "Unbound variable" var)
(liftIO . (`writeIORef` value))
(lookup var env)
return value
defineVar :: Env -> String -> LispVal -> IOThrowsError LispVal
defineVar envRef var value = do
alreadyDefined <- liftIO $ isBound envRef var
if alreadyDefined
then setVar envRef var value >> return value
else liftIO $ do
valueRef <- newIORef value
env <- readIORef envRef
writeIORef envRef ((var, valueRef) : env)
return value
bindVars :: Env -> [(String, LispVal)] -> IO Env
bindVars envRef bindings = readIORef envRef >>= extendEnv bindings >>= newIORef
where extendEnv bindings env = liftM (++ env) $ mapM addBinding bindings
addBinding (var, value) = do ref <- newIORef value
return (var, ref)
data LispVal = Atom String
| List [LispVal]
| DottedList [LispVal] LispVal
| Number Integer
| String String
| Bool Bool
| PrimitiveFunc ([LispVal] -> ThrowsError LispVal)
| Func { params :: [String]
, varargs :: Maybe String
, body :: [LispVal]
, env :: Env
}
| IOFunc ([LispVal] -> IOThrowsError LispVal)
| Port Handle
instance Show LispVal where show = showVal
data LispError = NumArgs Integer [LispVal]
| TypeMismatch String LispVal
| Parser ParseError
| BadSpecialForm String LispVal
| NotFunction String String
| UnboundVar String String
| Default String
showError :: LispError -> String
showError (UnboundVar message varName) = message ++ ": " ++ varName
showError (BadSpecialForm message form) = message ++ ": " ++ show form
showError (NotFunction message f) = message ++ ": " ++ show f
showError (NumArgs expected found) = "Expected " ++ show expected ++ " args; found values " ++ unwordsList found
showError (TypeMismatch expected found) = "Invalid type: expected " ++ expected ++ ", found " ++ show found
showError (Parser parseError) = "Parse error at " ++ show parseError
instance Show LispError where show = showError
instance Error LispError where
noMsg = Default "An error has occurred"
strMsg = Default
type ThrowsError = Either LispError
trapError :: (MonadError e m, Show e) => m String -> m String
trapError action = catchError action (return . show)
extractValue :: ThrowsError a -> a
extractValue (Right val) = val
spaces :: Parser ()
spaces = skipMany1 space
symbol :: Parser Char
symbol = oneOf "!#$%&|*+-/:<=>?@^_~"
stringChar :: Parser String
stringChar = notQuote <|> escapedQuote
where
notQuote = do
c <- noneOf "\""
return [c]
escapedQuote = do
q <- char '\\'
c <- char '"'
return $ q:[c]
parseString :: Parser LispVal
parseString = do
char '"'
x <- many (noneOf "\"")
char '"'
return $ String x
parseAtom :: Parser LispVal
parseAtom = do
first <- letter <|> symbol
rest <- many (letter <|> digit <|> symbol)
let atom = first : rest
return $ case atom of
"#t" -> Bool True
"#f" -> Bool False
_ -> Atom atom
parseNumber :: Parser LispVal
parseNumber = Number . read <$> many1 digit
parseList :: Parser LispVal
parseList = List <$> parseExpr `sepBy` spaces
parseDottedList :: Parser LispVal
parseDottedList = do
h <- parseExpr `endBy` spaces
t <- char '.' >> spaces >> parseExpr
return $ DottedList h t
parseQuoted :: Parser LispVal
parseQuoted = do
char '\''
x <- parseExpr
return $ List [Atom "quote", x]
parseExpr :: Parser LispVal
parseExpr = parseAtom
<|> parseString
<|> parseNumber
<|> parseQuoted
<|> between (char '(') (char ')') (try parseList <|> parseDottedList)
readOrThrow :: Parser a -> String -> ThrowsError a
readOrThrow parser input =
case parse parser "lisp" input of
Left err -> throwError $ Parser err
Right val -> return val
readExpr :: String -> ThrowsError LispVal
readExpr = readOrThrow parseExpr
readExprList :: String -> ThrowsError [LispVal]
readExprList = readOrThrow (parseExpr `endBy` spaces)
showVal :: LispVal -> String
showVal (String contents) = "\"" ++ contents ++ "\""
showVal (Atom name) = name
showVal (Number contents) = show contents
showVal (Bool True) = "#t"
showVal (Bool False) = "#f"
showVal (List contents) = "(" ++ unwordsList contents ++ ")"
showVal (DottedList h t) = "(" ++ unwordsList h ++ " . " ++ showVal t ++ ")"
showVal (PrimitiveFunc _) = "<primitive>"
showVal (Func {params, varargs, body, env}) =
"(lambda (" ++ unwords (map show params) ++
(case varargs of
Nothing -> ""
Just arg -> " . " ++ arg) ++ ") ...)"
eval :: Env -> LispVal -> IOThrowsError LispVal
eval env val@(String _) = return val
eval env val@(Number _) = return val
eval env val@(Bool _) = return val
eval env (Atom id) = getVar env id
eval env (List [Atom "quote", val]) = return val
eval env (List [Atom "if", pred, conseq, alt]) =
do result <- eval env pred
case result of
Bool True -> eval env conseq
Bool False -> eval env alt
notBool -> throwError $ TypeMismatch "bool" notBool
eval env (List [Atom "set!", Atom var, form]) = eval env form >>= setVar env var
eval env (List [Atom "define", Atom var, form]) = eval env form >>= defineVar env var
eval env (List [Atom "load", String fileName]) = do
program <- load fileName
last <$> mapM (eval env) program
eval env (List (Atom "define" : List (Atom var : params) : body)) =
makeNormalFunc env params body >>= defineVar env var
eval env (List (Atom "define" : DottedList (Atom var : params) varargs : body)) =
makeVarArgs varargs env params body >>= defineVar env var
eval env (List (Atom "lambda" : List params : body)) =
makeNormalFunc env params body
eval env (List (Atom "lambda" : DottedList params varargs : body)) =
makeVarArgs varargs env params body
eval env (List (Atom "lambda" : varargs@(Atom _) : body)) =
makeVarArgs varargs env [] body
eval env (List (func : args)) = do
f <- eval env func
argVals <- mapM (eval env) args
apply f argVals
eval env badForm = throwError $ BadSpecialForm "Unrecognized special form" badForm
makeNormalFunc = makeFunc Nothing
makeVarArgs = makeFunc . Just . showVal
makeFunc varargs env params body = return $ Func (map showVal params) varargs body env
apply :: LispVal -> [LispVal] -> IOThrowsError LispVal
apply (PrimitiveFunc f) args = liftThrows $ f args
apply (Func {params, varargs, body, env}) args =
if num params /= num args && isNothing varargs
then throwError $ NumArgs (num params) args
else (liftIO . bindVars env $ zip params args)
>>= bindVarArgs varargs
>>= evalBody
where remainingArgs = drop (length params) args
num = toInteger . length
evalBody env = last <$> mapM (eval env) body
bindVarArgs arg env =
case arg of
Just argName -> liftIO $ bindVars env [(argName, List remainingArgs)]
Nothing -> return env
apply (IOFunc func) args = func args
apply notAFunc _ = throwError $ TypeMismatch "func" notAFunc
primitiveBindings :: IO Env
primitiveBindings = do
n <- nullEnv
bindVars n $ map (makeFunc IOFunc) ioPrimitives
++ map (makeFunc PrimitiveFunc) primitives
where makeFunc constructor (var, f) = (var, constructor f)
primitives :: [(String, [LispVal] -> ThrowsError LispVal)]
primitives = [ ("+", numericBinOp (+))
, ("-", numericBinOp (-))
, ("*", numericBinOp (*))
, ("/", numericBinOp div)
, ("mod", numericBinOp mod)
, ("quotient", numericBinOp quot)
, ("remainder", numericBinOp rem)
, ("=", numBoolBinOp (==))
, ("<", numBoolBinOp (<))
, (">", numBoolBinOp (>))
, ("/=", numBoolBinOp (/=))
, (">=", numBoolBinOp (>=))
, ("<=", numBoolBinOp (<=))
, ("&&", boolBoolBinOp (&&))
, ("||", boolBoolBinOp (||))
, ("string=?", strBoolBinOp (==))
, ("string<?", strBoolBinOp (<))
, ("string>?", strBoolBinOp (>))
, ("string<=?", strBoolBinOp (<=))
, ("string>=?", strBoolBinOp (>=))
, ("car", car)
, ("cdr", cdr)
, ("cons", cons)
, ("eq?", eqv)
, ("eqv?", eqv)
, ("equal?", equal)
]
ioPrimitives :: [(String, [LispVal] -> IOThrowsError LispVal)]
ioPrimitives = [ ("apply", applyProc)
, ("open-input-file", makePort ReadMode)
, ("open-output-file", makePort WriteMode)
, ("close-input-port", closePort)
, ("close-output-port", closePort)
, ("read", readProc)
, ("write", writeProc)
, ("read-contents", readContents)
, ("read-all", readAll)
]
boolBinOp :: (LispVal -> ThrowsError a) -> (a -> a -> Bool) -> [LispVal] -> ThrowsError LispVal
boolBinOp unpacker op args =
if length args /= 2
then throwError $ NumArgs 2 args
else do left <- unpacker $ head args
right <- unpacker $ args !! 1
return . Bool $ left `op` right
numBoolBinOp = boolBinOp unpackNum
strBoolBinOp = boolBinOp unpackStr
boolBoolBinOp = boolBinOp unpackBool
numericBinOp :: (Integer -> Integer -> Integer) -> [LispVal] -> ThrowsError LispVal
numericBinOp _ [] = throwError $ NumArgs 2 []
numericBinOp _ singleVal@[_] = throwError $ NumArgs 2 singleVal
numericBinOp op params = liftM (Number . foldl1 op) $ mapM unpackNum params
unpackNum :: LispVal -> ThrowsError Integer
unpackNum (Number n) = return n
unpackNum notNum = throwError $ TypeMismatch "number" notNum
unpackStr :: LispVal -> ThrowsError String
unpackStr (String s) = return s
unpackStr (Number s) = return $ show s
unpackStr (Bool s) = return $ show s
unpackStr notString = throwError $ TypeMismatch "string" notString
unpackBool :: LispVal -> ThrowsError Bool
unpackBool (Bool b) = return b
unpackBool notBool = throwError $ TypeMismatch "boolean" notBool
unwordsList :: [LispVal] -> String
unwordsList = unwords . map showVal
car :: [LispVal] -> ThrowsError LispVal
car [List (x : _)] = return x
car [DottedList (x : _) _] = return x
car [badArg] = throwError $ TypeMismatch "pair" badArg
car badArgList = throwError $ NumArgs 1 badArgList
cdr :: [LispVal] -> ThrowsError LispVal
cdr [List (_ : xs)] = return $ List xs
cdr [DottedList [_] x] = return x
cdr [DottedList (_ : xs) x] = return $ DottedList xs x
cdr [badArg] = throwError $ TypeMismatch "pair" badArg
cdr badArgList = throwError $ NumArgs 1 badArgList
cons :: [LispVal] -> ThrowsError LispVal
cons [x, List []] = return $ List [x]
cons [x, List xs] = return $ List (x : xs)
cons [x, DottedList xs xlast] = return $ DottedList (x : xs) xlast
cons [x, y] = return $ DottedList [x] y
cons badArgList = throwError $ NumArgs 2 badArgList
eqv :: [LispVal] -> ThrowsError LispVal
eqv [Bool a, Bool b] = return . Bool $ a == b
eqv [Number a, Number b] = return . Bool $ a == b
eqv [String a, String b] = return . Bool $ a == b
eqv [Atom a, Atom b] = return . Bool $ a == b
eqv [DottedList xs x, DottedList ys y] = eqv [List (xs ++ [x]), List (ys ++ [y])]
eqv [List xs, List ys] = return . Bool $ length xs == length ys &&
all eqvPair (xs `zip` ys)
where eqvPair (x, y) = case eqv [x, y] of
Left _ -> False
Right (Bool val) -> val
eqv [_, _] = return $ Bool False
eqv badArgList = throwError $ NumArgs 2 badArgList
data Unpacker = forall a. Eq a => AnyUnpacker (LispVal -> ThrowsError a)
unpackEquals :: LispVal -> LispVal -> Unpacker -> ThrowsError Bool
unpackEquals a b (AnyUnpacker unpacker) =
do ua <- unpacker a
ub <- unpacker b
return $ ua == ub
`catchError` const (return False)
equal :: [LispVal] -> ThrowsError LispVal
equal [a, b] = do
primitiveEquals <- liftM or $ mapM (unpackEquals a b)
[ AnyUnpacker unpackNum
, AnyUnpacker unpackStr
, AnyUnpacker unpackBool
]
Bool eqvEquals <- eqv [a, b]
return . Bool $ (primitiveEquals || eqvEquals)
equal badArgList = throwError $ NumArgs 2 badArgList
applyProc :: [LispVal] -> IOThrowsError LispVal
applyProc [f, List args] = apply f args
applyProc (f : args) = apply f args
makePort :: IOMode -> [LispVal] -> IOThrowsError LispVal
makePort mode [String fileName] = liftM Port . liftIO $ openFile fileName mode
closePort :: [LispVal] -> IOThrowsError LispVal
closePort [Port port] = do
liftIO $ hClose port
return $ Bool True
closePort _ = return $ Bool False
readProc :: [LispVal] -> IOThrowsError LispVal
readProc [] = readProc [Port stdin]
readProc [Port port] = do
line <- liftIO $ hGetLine port
liftThrows $ readExpr line
writeProc :: [LispVal] -> IOThrowsError LispVal
writeProc [obj] = writeProc [obj, Port stdout]
writeProc [obj, Port port] = do
liftIO $ hPrint port obj
return $ Bool True
readContents :: [LispVal] -> IOThrowsError LispVal
readContents [String fileName] = liftM String $ liftIO $ readFile fileName
load :: String -> IOThrowsError [LispVal]
load fileName = do
contents <- liftIO $ readFile fileName
liftThrows $ readExprList contents
readAll :: [LispVal] -> IOThrowsError LispVal
readAll [String fileName] = List <$> load fileName
flushStr :: String -> IO ()
flushStr str = putStr str >> hFlush stdout
readPrompt :: String -> IO String
readPrompt prompt = flushStr prompt >> getLine
evalString :: Env -> String -> IO String
evalString env expr = runIOThrows . liftM show $ liftThrows (readExpr expr) >>= eval env
evalAndPrint :: Env -> String -> IO ()
evalAndPrint env expr = do
result <- evalString env expr
putStrLn result
until_ :: Monad m => (a -> Bool) -> m a -> (a -> m ()) -> m ()
until_ pred prompt action = do
result <- prompt
unless (pred result) $ do
action result
until_ pred prompt action
runOne :: [String] -> IO ()
runOne args = do
env <- do
prims <- primitiveBindings
bindVars prims [("args", List . map String $ tail args)]
result <- runIOThrows $ liftM show $ eval env (List [Atom "load", String (head args)])
hPutStrLn stderr result
runRepl :: IO ()
runRepl = primitiveBindings >>= until_ (== "quit") (readPrompt "Lisp» ") . evalAndPrint
main :: IO ()
main = do
args <- getArgs
if null args
then runRepl
else runOne args
| wolverian/scheme48 | Main.hs | mit | 16,660 | 0 | 15 | 4,856 | 5,926 | 2,994 | 2,932 | 387 | 3 |
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Data.Text (Text)
import qualified Data.Text as T
import qualified Data.Text.IO as T
import System.Environment (getArgs)
import Data.List (transpose)
import Options.Applicative
-- IN PROGRESS: TODO implement startAtColumn
data Options = Options {
matchMode :: MatchMode
, matchStrings :: [Text]
} deriving (Show)
data MatchMode = Series | Alternatives deriving Show
parseOpts :: Parser Options
parseOpts = Options
<$> flag Series Alternatives
(short 'a' <> long "alternatives"
<> help "Treat match strings as alternatives for alignment, like regex /(a|b|c)/.")
<*> ((map T.pack) <$>
many (argument str
(metavar "MATCH STRING"
<> help "The string to align on, between a pair of single quotes. Can be part of a series of match strings.")))
opts = info (helper <*> parseOpts)
(fullDesc <> progDesc "Align code text from STDIN on operators."
<> header "align"
<> footer "See https://github.com/danchoi/align for more info.")
main = do
Options mode alignStrings <- execParser opts
input <- (T.lines . T.pack) <$> getContents
let result :: [Text]
result =
case mode of
Series -> foldl (\lines sep -> align lines sep) input alignStrings
Alternatives -> alignOnAlteratives alignStrings input
T.putStr . T.unlines $ result
-- Aligning in standard Series mode
align :: [Text] -> Text -> [Text]
align lines alignString =
let lines' :: [[Text]]
lines' = map (splitOn alignString) lines
firstCol:rest = transpose lines'
firstCol' = adjustWidth alignString firstCol
lines'' = transpose (firstCol':rest)
in map T.concat lines''
-- split a line into two segments if it contains the alignstring,
-- and one if it doesn't
splitOn :: Text -> Text -> [Text]
splitOn alignString input =
-- breakOn "::" "a::b::c" ==> ("a", "::b::c")
-- breakOn "/" "foobar" ==> ("foobar", "")
let (x,y) = T.breakOn alignString input
in [z | z <- [T.stripEnd x, y], z /= mempty]
-- | Makes column cells in a column the same width
-- Used for the standard Series mode.
adjustWidth :: Text -> [Text] -> [Text]
adjustWidth alignStr xs =
map maybeAdjust xs
where maxWidth = maximum $ map (T.length . T.stripEnd) $ xs
maybeAdjust :: Text -> Text
maybeAdjust cell = (T.justifyLeft maxWidth ' ' . T.stripEnd $ cell) <> " "
alignOnAlteratives :: [Text] -> [Text] -> [Text]
alignOnAlteratives alts lines =
-- each row cell contains (Maybe alternative) that was split on
let lines' :: [[Text]]
lines' = map (splitOnAny alts) lines
firstCol:rest = transpose lines'
firstCol' = adjustWidth' firstCol
lines'' = transpose (firstCol':rest)
in map T.concat lines''
-- | adjust width of a column of cells
adjustWidth' :: [Text] -> [Text]
adjustWidth' xs =
map adj xs
where maxWidth = maximum $ map (T.length . T.stripEnd) xs
adj cell = T.justifyLeft maxWidth ' ' . T.stripEnd $ cell
-- | Attempts to split line on any of the alternatives
-- and returns the segments
splitOnAny :: [Text] -> Text -> [Text]
splitOnAny alts line =
let alts' = filter (`T.isInfixOf` line) alts
in if null alts'
then [line]
else let alt = head alts'
in splitOn alt line
-- | strips whitespace around text but makes sure that it's left-padded with one space
trim :: Text -> Text -> Text
trim alignString s =
-- alignString will be on 1st elem of tuple:
let (x,y) = T.breakOnEnd alignString s
in T.cons ' '
$ mconcat [T.strip x, " ", T.strip y]
| danchoi/align-text | Main.hs | mit | 3,748 | 0 | 16 | 971 | 1,005 | 530 | 475 | 78 | 2 |
data List a = Nil | Cons a (List a) deriving (Eq, Show)
instance Functor List where
fmap _ Nil = Nil
fmap f (Cons x xs) = Cons (f x) (fmap f xs)
append Nil ys = ys
append (Cons x xs) ys = Cons x (append xs ys)
instance Applicative List where
pure x = Cons x Nil
(<*>) _ Nil = Nil
(<*>) Nil _ = Nil
(<*>) (Cons f fs) xs = fmap f xs `append` (fs <*> xs)
newtype ZipList' a = ZipList' (List a) deriving (Eq, Show)
instance Functor ZipList' where
fmap f (ZipList' xs) = ZipList' $ fmap f xs
instance Applicative ZipList' where
pure x = ZipList' $ pure x
(<*>) (ZipList' xs) (ZipList' ys) = ZipList' $ zipWith' ($) xs ys
zipWith' :: (a -> b -> c) -> List a -> List b -> List c
zipWith' _ _ Nil = Nil
zipWith' _ Nil _ = Nil
zipWith' f (Cons x xs) (Cons y ys) = Cons (f x y) (zipWith' f xs ys)
repeat' x = Cons x (repeat' x)
| JustinUnger/haskell-book | ch17/ziplist-app.hs | mit | 862 | 0 | 8 | 226 | 462 | 236 | 226 | 22 | 1 |
module Rebase.GHC.Num
(
module GHC.Num
)
where
import GHC.Num
| nikita-volkov/rebase | library/Rebase/GHC/Num.hs | mit | 65 | 0 | 5 | 12 | 20 | 13 | 7 | 4 | 0 |
module Hasql.Private.Session
where
import Hasql.Private.Prelude
import Hasql.Private.Errors
import qualified Database.PostgreSQL.LibPQ as LibPQ
import qualified Hasql.Private.Decoders.Results as Decoders.Results
import qualified Hasql.Private.Decoders.Result as Decoders.Result
import qualified Hasql.Private.Encoders.Params as Encoders.Params
import qualified Hasql.Private.Encoders as Encoders
import qualified Hasql.Private.Settings as Settings
import qualified Hasql.Private.IO as IO
import qualified Hasql.Statement as Statement
import qualified Hasql.Private.Connection as Connection
-- |
-- A batch of actions to be executed in the context of a database connection.
newtype Session a =
Session (ReaderT Connection.Connection (ExceptT QueryError IO) a)
deriving (Functor, Applicative, Monad, MonadError QueryError, MonadIO, MonadReader Connection.Connection)
-- |
-- Executes a bunch of commands on the provided connection.
run :: Session a -> Connection.Connection -> IO (Either QueryError a)
run (Session impl) connection =
runExceptT $
runReaderT impl connection
-- |
-- Possibly a multi-statement query,
-- which however cannot be parameterized or prepared,
-- nor can any results of it be collected.
sql :: ByteString -> Session ()
sql sql =
Session $ ReaderT $ \(Connection.Connection pqConnectionRef integerDatetimes registry) ->
ExceptT $ fmap (mapLeft (QueryError sql [])) $ withMVar pqConnectionRef $ \pqConnection -> do
r1 <- IO.sendNonparametricStatement pqConnection sql
r2 <- IO.getResults pqConnection integerDatetimes decoder
return $ r1 *> r2
where
decoder =
Decoders.Results.single Decoders.Result.noResult
-- |
-- Parameters and a specification of a parametric single-statement query to apply them to.
statement :: params -> Statement.Statement params result -> Session result
statement input (Statement.Statement template (Encoders.Params paramsEncoder) decoder preparable) =
Session $ ReaderT $ \(Connection.Connection pqConnectionRef integerDatetimes registry) ->
ExceptT $ fmap (mapLeft (QueryError template inputReps)) $ withMVar pqConnectionRef $ \pqConnection -> do
r1 <- IO.sendParametricStatement pqConnection integerDatetimes registry template paramsEncoder preparable input
r2 <- IO.getResults pqConnection integerDatetimes (unsafeCoerce decoder)
return $ r1 *> r2
where
inputReps =
let
Encoders.Params.Params (Op encoderOp) = paramsEncoder
step (_, _, _, rendering) acc =
rendering : acc
in foldr step [] (encoderOp input) | nikita-volkov/hasql | library/Hasql/Private/Session.hs | mit | 2,579 | 0 | 15 | 423 | 623 | 341 | 282 | 41 | 1 |
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
module Calc where
import Control.Monad
import ExprT
import Parser
import StackVM
newtype MinMax = MinMax Integer deriving (Eq, Show)
newtype Mod7 = Mod7 Integer deriving (Eq, Show)
class Expr a where
lit :: Integer -> a
add :: a -> a -> a
mul :: a -> a -> a
instance Expr ExprT where
lit = ExprT.Lit
add = ExprT.Add
mul = ExprT.Mul
instance Expr Integer where
lit = id
add = (+)
mul = (*)
instance Expr Bool where
lit x = if x <= 0 then False else True
add = (||)
mul = (&&)
instance Expr MinMax where
lit = MinMax
add (MinMax x) (MinMax y) = lit (max x y)
mul (MinMax x) (MinMax y) = lit (min x y)
instance Expr Mod7 where
lit x = Mod7 (x `mod` 7)
add (Mod7 x) (Mod7 y) = lit (x + y)
mul (Mod7 x) (Mod7 y) = lit (x * y)
instance Expr Program where
lit x = [PushI x]
add x y = x ++ y ++ [StackVM.Add]
mul x y = x ++ y ++ [StackVM.Mul]
eval :: ExprT -> Integer
eval (ExprT.Lit x) = x
eval (ExprT.Add x y) = eval x + eval y
eval (ExprT.Mul x y) = eval x * eval y
evalStr :: String -> Maybe Integer
evalStr str = case parseExp ExprT.Lit ExprT.Add ExprT.Mul str of
Just expr -> Just (eval expr)
Nothing -> Nothing
compile :: String -> Maybe Program
compile = parseExp lit add mul
-- test arithmatic experssions
testExp :: Expr a => Maybe a
testExp = parseExp lit add mul "(3 * -4) + 5"
testInteger = testExp :: Maybe Integer
testBool = testExp :: Maybe Bool
testMM = testExp :: Maybe MinMax
testSat = testExp :: Maybe Mod7
-- test StackVM expressions
-- the following expression should evaluate to Just (Right (IVal (-7)))
testCompile = liftM stackVM (compile "(3 * -4) + 5")
| limdauto/learning-haskell | cis194/week5/Calc.hs | mit | 1,752 | 0 | 10 | 435 | 699 | 368 | 331 | 54 | 2 |
module Reforest.Grammar where
import Data.List
import qualified Data.Set as Set
nub' :: Ord a => [a] -> [a]
nub' = Set.toList . Set.fromList
data NonTerm = NT Int Int
deriving (Eq, Ord)
instance Show NonTerm where
show (NT i _) = "A" ++ show i
data Sym = Con String Int | Var NonTerm | Bnd Int
deriving (Eq, Ord)
arity :: Sym -> Int
arity (Con _ k) = k
arity (Var (NT _ k)) = k
arity (Bnd _) = 0
instance Show Sym where
show (Con f _) = f
show (Var a) = show a
show (Bnd i) = "x" ++ show i
data Term = App Sym [Term]
deriving (Eq, Ord)
rootSym :: Term -> Sym
rootSym (App f _) = f
args :: Term -> [Term]
args (App _ xs) = xs
instance Show Term where
show (App s []) = show s
show (App s xs) = show s ++ "(" ++ intercalate "," (map show xs) ++ ")"
data Production = Prod NonTerm Term
deriving (Eq, Ord)
lhs :: Production -> NonTerm
rhs :: Production -> Term
lhs (Prod a _) = a
rhs (Prod _ t) = t
instance Show Production where
show (Prod a@(NT _ k) t) = show (App (Var a) [App (Bnd i) [] | i <- [0..k-1]]) ++ "->" ++ show t
type Grammar = [Production]
prods :: NonTerm -> Grammar -> Grammar
prods a = filter ((== a) . lhs)
nonTerms :: Grammar -> [NonTerm]
nonTerms = nub' . map lhs
projections :: Grammar -> [Grammar]
projections g = sequence [ prods a g | a <- nonTerms g ]
unambDef :: Grammar -> NonTerm -> Maybe Production
unambDef g a = case prods a g of
[p] -> Just p
_ -> Nothing
subst :: Production -> Term -> Term
subst p@(Prod a t) (App (Var a') xs)
| a == a' = subst' (map (subst p) xs) t
subst p (App f xs) = App f (map (subst p) xs)
subst' :: [Term] -> Term -> Term
subst' as (App (Bnd i) []) = as !! i
subst' as (App f xs) = App f (map (subst' as) xs)
elimDef :: NonTerm -> Grammar -> Grammar
elimDef n g = map elimDef' (filter ((/= n) . lhs) g)
where elimDef' (Prod a t) = Prod a (subst p t)
Just p = unambDef g n
genLang'' :: NonTerm -> Grammar -> [Term]
genLang'' a g = map rhs $ prods a $ foldr elimDef g (nonTerms g \\ [a])
genLang' :: NonTerm -> Grammar -> [Term]
genLang' a g = nub' (projections g >>= genLang'' a)
genLang :: Grammar -> [Term]
genLang = genLang' (NT 0 0)
| gebner/reforest | src/Reforest/Grammar.hs | mit | 2,237 | 0 | 15 | 599 | 1,163 | 596 | 567 | 64 | 2 |
module Lesson03 where
import Helper
import System.Random.Shuffle
-- Let's make things a little more complicated. Instead of hard-coding
-- two players in our game, let's allow the user to specify how many
-- players to allow. In order to make this work, we're going to need
-- to define a helper function.
--
-- This function is going to take a number telling us how many players
-- there are, and the deck. Fortunately, defining a function that takes
-- two arguments is easy:
deal numberOfPlayers shuffled =
-- We're going to use a technique called recursion, which means our
-- function will end up calling itself. In order to make that work,
-- we need to start off with the "base case," which is when there are
-- no players. In that case, we want to tell the user that all hands
-- have been dealt:
if numberOfPlayers == 0
then putStrLn "All hands dealt"
-- If numberOfPlayers is not 0, then...
else do
-- Again, we'll split up our deck into the current hand and
-- the rest of the deck using splitAt:
let (hand, rest) = splitAt 5 shuffled
-- Now we can print the current hand:
print hand
-- And now the fun part: we need to deal the rest of the players.
-- In order to make this work, we need to call deal again, but
-- this time with one less player, and with the new list of
-- cards.
deal (numberOfPlayers - 1) rest
-- Now that we've written our helper function, main should be easy:
main = do
putStrLn "How many players?"
numberOfPlayers <- readLn
shuffled <- shuffleM deck
deal numberOfPlayers shuffled
{-
Exercises:
1. What happens if you enter a negative number of players? Can you think of a way
to make the program behave better?
2. What happens if you enter an invalid number?
3. Can you make the program ask to deal another game after the first one finishes?
Hint: what you want it to do is, after it deals, start running main again.
4. Advanced: in the previous example, we printed the player number before each hand.
Can you do that here? Hint: you'll want to pass in an extra argument to deal
to give the player number, and add 1 to it each time you call deal again.
You'll also need to give an initial value when you call deal in main.
Behind the scenes:
The code above used an if/then/else statement, which is pretty common in many programming
languages. While this works in Haskell, it's not what most Haskellers would consider
pretty. Usually, Haskell favors something called "pattern matching." The easiest way to
explain that technique is to rewrite deal using it:
-}
deal2 0 shuffled = putStrLn "All hands dealt"
deal2 numberOfPlayers shuffled = do
let (hand, rest) = splitAt 5 shuffled
print hand
deal (numberOfPlayers - 1) rest
{-
The way pattern matching works is that Haskell tries to match the parameters
against the patterns, one at a time. If the number of players is 0, then
the first pattern (or first *clause*) matches, and we print "All hands dealt."
Otherwise, it moves onto the next clause. Both numberOfPlayers and shuffled are
variable names, and therefore count as "wildcard patterns," which will match anything.
-} | snoyberg/haskell-impatient-poker-players | src/Lesson03.hs | mit | 3,288 | 0 | 11 | 779 | 205 | 110 | 95 | 20 | 2 |
{-|
Module: Flaw.UI.Slider
Description: Slider is an element with value.
License: MIT
-}
module Flaw.UI.Slider
( Slider(..)
, newSlider
) where
import Control.Concurrent.STM
import Control.Monad
import Data.Maybe
import Flaw.Graphics.Canvas
import Flaw.Input.Keyboard
import Flaw.Input.Mouse
import Flaw.Math
import Flaw.UI
import Flaw.UI.Drawer
import Flaw.UI.Metrics
data Slider = Slider
{ sliderValueVar :: !(TVar Float)
, sliderSizeVar :: !(TVar Size)
, sliderFocusedVar :: !(TVar Bool)
, sliderLastMousePositionVar :: !(TVar (Maybe Position))
, sliderFirstPressedPositionValueVar :: !(TVar (Maybe (Position, Float)))
, sliderChangeHandlerVar :: !(TVar (STM ()))
, sliderPieceWidth :: {-# UNPACK #-} !Metric
, sliderValueStep :: {-# UNPACK #-} !Float
}
newSlider :: Metrics -> Float -> STM Slider
newSlider Metrics
{ metricsSliderPieceWidth = pieceWidth
} valueStep = do
valueVar <- newTVar 0
sizeVar <- newTVar $ Vec2 0 0
focusedVar <- newTVar False
lastMousePositionVar <- newTVar Nothing
firstPressedPositionValueVar <- newTVar Nothing
changeHandlerVar <- newTVar $ return ()
return Slider
{ sliderValueVar = valueVar
, sliderSizeVar = sizeVar
, sliderFocusedVar = focusedVar
, sliderLastMousePositionVar = lastMousePositionVar
, sliderFirstPressedPositionValueVar = firstPressedPositionValueVar
, sliderChangeHandlerVar = changeHandlerVar
, sliderPieceWidth = pieceWidth
, sliderValueStep = valueStep
}
instance Element Slider where
layoutElement Slider
{ sliderSizeVar = sizeVar
} = writeTVar sizeVar
dabElement Slider
{ sliderSizeVar = sizeVar
} (Vec2 x y) =
if x < 0 || y < 0 then return False
else do
size <- readTVar sizeVar
let Vec2 sx sy = size
return $ x < sx && y < sy
renderElement Slider
{ sliderValueVar = valueVar
, sliderSizeVar = sizeVar
, sliderFocusedVar = focusedVar
, sliderLastMousePositionVar = lastMousePositionVar
, sliderFirstPressedPositionValueVar = firstPressedPositionValueVar
, sliderPieceWidth = pieceWidth
} Drawer
{ drawerCanvas = canvas
, drawerStyles = DrawerStyles
{ drawerLoweredStyleVariant = loweredStyleVariant
, drawerRaisedStyleVariant = raisedStyleVariant
}
} (Vec2 px py) = do
-- get state
value <- readTVar valueVar
size <- readTVar sizeVar
let Vec2 sx sy = size
focused <- readTVar focusedVar
moused <- isJust <$> readTVar lastMousePositionVar
pressed <- isJust <$> readTVar firstPressedPositionValueVar
-- get style
let
styleVariant
| pressed = styleVariantPressedStyle
| moused || focused = styleVariantMousedStyle
| otherwise = styleVariantNormalStyle
loweredStyle = styleVariant loweredStyleVariant
raisedStyle = styleVariant raisedStyleVariant
-- return rendering
return $ do
let my = py + sy `quot` 2
drawBorderedRectangle canvas
(Vec4 px (px + 1) (px + sx - 1) (px + sx))
(Vec4 (my - 2) (my - 1) (my + 1) (my + 2))
(styleFillColor loweredStyle) (styleBorderColor loweredStyle)
let x = px + floor (value * fromIntegral (sx - pieceWidth))
drawBorderedRectangle canvas
(Vec4 x (x + 1) (x + pieceWidth - 1) (x + pieceWidth))
(Vec4 py (py + 1) (py + sy - 1) (py + sy))
(styleFillColor raisedStyle) (styleBorderColor raisedStyle)
processInputEvent Slider
{ sliderValueVar = valueVar
, sliderSizeVar = sizeVar
, sliderLastMousePositionVar = lastMousePositionVar
, sliderFirstPressedPositionValueVar = firstPressedPositionValueVar
, sliderChangeHandlerVar = changeHandlerVar
, sliderPieceWidth = pieceWidth
, sliderValueStep = valueStep
} inputEvent _inputState = case inputEvent of
KeyboardInputEvent keyboardEvent -> case keyboardEvent of
KeyDownEvent KeyLeft -> do
value <- readTVar valueVar
changeValue $ max 0 $ value - valueStep
return True
KeyDownEvent KeyRight -> do
value <- readTVar valueVar
changeValue $ min 1 $ value + valueStep
return True
_ -> return False
MouseInputEvent mouseEvent -> case mouseEvent of
MouseDownEvent LeftMouseButton -> do
maybeLastMousePosition <- readTVar lastMousePositionVar
writeTVar firstPressedPositionValueVar =<< case maybeLastMousePosition of
Just lastMousePosition@(Vec2 x _y) -> do
firstValue <- readTVar valueVar
-- check if click hit slider
size <- readTVar sizeVar
let
Vec2 sx _sy = size
l = floor $ firstValue * fromIntegral (sx - pieceWidth)
if x >= l && x < l + pieceWidth then return $ Just (lastMousePosition, firstValue)
else do
let newValue = max 0 $ min 1 $ fromIntegral (x - pieceWidth `quot` 2) / fromIntegral (sx - pieceWidth)
changeValue newValue
return $ Just (lastMousePosition, newValue)
Nothing -> return Nothing
return True
MouseUpEvent LeftMouseButton -> do
writeTVar firstPressedPositionValueVar Nothing
return True
CursorMoveEvent x y -> do
maybeFirstPressedPositionValue <- readTVar firstPressedPositionValueVar
case maybeFirstPressedPositionValue of
Just (Vec2 fx _fy, firstValue) -> do
size <- readTVar sizeVar
let Vec2 sx _sy = size
changeValue $ max 0 $ min 1 $ firstValue + fromIntegral (x - fx) / fromIntegral (sx - pieceWidth)
Nothing -> return ()
writeTVar lastMousePositionVar $ Just $ Vec2 x y
return True
_ -> return False
MouseLeaveEvent -> do
writeTVar lastMousePositionVar Nothing
writeTVar firstPressedPositionValueVar Nothing
return True
where
changeValue newValue = do
writeTVar valueVar newValue
join $ readTVar changeHandlerVar
focusElement Slider
{ sliderFocusedVar = focusedVar
} = do
writeTVar focusedVar True
return True
unfocusElement Slider
{ sliderFocusedVar = focusedVar
} = writeTVar focusedVar False
instance HasFloatValue Slider where
setFloatValue Slider
{ sliderValueVar = valueVar
} = writeTVar valueVar
getFloatValue Slider
{ sliderValueVar = valueVar
} = readTVar valueVar
instance HasChangeHandler Slider where
setChangeHandler Slider
{ sliderChangeHandlerVar = changeHandlerVar
} = writeTVar changeHandlerVar
instance HasPreferredSize Slider where
preferredSize Metrics
{ metricsMainWidth = width
, metricsSliderHeight = height
} _ = Vec2 width height
| quyse/flaw | flaw-ui/Flaw/UI/Slider.hs | mit | 6,705 | 8 | 30 | 1,698 | 1,640 | 841 | 799 | 183 | 1 |
#!/usr/bin/env runhaskell
import Test.Tasty
import Test.Tasty.SmallCheck as SC
import Test.Tasty.QuickCheck as QC
import Test.Tasty.HUnit
import Data.List
import qualified Text.GraphQL.Schema as GQ
import qualified Text.GraphQL.Schema.Graphene as Graphene
properties :: TestTree
properties = testGroup "Properties" [scProps, qcProps]
scProps :: TestTree
scProps = testGroup "(checked by SmallCheck)"
[ SC.testProperty "sort == sort . reverse" $
\list -> sort (list :: [Int]) == sort (reverse list)
, SC.testProperty "Fermat's little theorem" $
\x -> ((x :: Integer)^(7 :: Integer) - x) `mod` 7 == 0
-- the following property does not hold
--, SC.testProperty "Fermat's last theorem" $
-- \x y z n ->
-- (n :: Integer) >= 3 SC.==> x^n + y^n /= (z^n :: Integer)
]
qcProps :: TestTree
qcProps = testGroup "(checked by QuickCheck)"
[ QC.testProperty "sort == sort . reverse" $
\list -> sort (list :: [Int]) == sort (reverse list)
, QC.testProperty "Fermat's little theorem" $
\x -> ((x :: Integer)^(7 :: Integer) - x) `mod` 7 == 0
-- the following property does not hold
-- , QC.testProperty "Fermat's last theorem" $
-- \x y z n ->
-- (n :: Integer) >= 3 QC.==> x^n + y^n /= (z^n :: Integer)
]
unitTests :: TestTree
unitTests = testGroup "Unit tests"
[ testCase "IntType equality" $
GQ.IntType @?= GQ.IntType
, testCase "FloatType equality" $
GQ.FloatType @?= GQ.FloatType
, testCase "BooleanType equality" $
GQ.BooleanType @?= GQ.BooleanType
, testCase "IDType equality" $
GQ.IDType @?= GQ.IDType
, testCase "StringType equality" $
GQ.StringType @?= GQ.StringType
, testCase "NonNull equality" $
GQ.NonNull GQ.StringType @?= GQ.NonNull GQ.StringType
, testCase "List equality" $
GQ.ListType (GQ.NonNull GQ.FloatType) @?=
GQ.ListType (GQ.NonNull GQ.FloatType)
, testCase "EnumValue equality" $
GQ.EnumValue { GQ.evName="", GQ.evValue="1" } @?=
GQ.EnumValue { GQ.evName="", GQ.evValue="1" }
, testCase "Enum equality" $
GQ.EnumType GQ.EnumDef { GQ.enumName="", GQ.enumValues=[] } @?=
GQ.EnumType GQ.EnumDef { GQ.enumName="", GQ.enumValues=[] }
, testCase "ObjectField equality" $
GQ.ObjectField { GQ.fieldName="", GQ.fieldType=GQ.StringType } @?=
GQ.ObjectField { GQ.fieldName="", GQ.fieldType=GQ.StringType }
, testCase "Graphene Render Smoke" $
Graphene.render GQ.ObjectType {
GQ.objName = "MyObj",
GQ.objFields = [GQ.ObjectField{
GQ.fieldName = "first_name",
GQ.fieldType = GQ.StringType
}]
}
@?=
("class MyObj(graphene.ObjectType):\n" ++
" first_name = graphene.String()")
, testCase "Graphene Render Full" $
Graphene.render GQ.ObjectType {
GQ.objName = "MyObj",
GQ.objFields = [
GQ.ObjectField{
GQ.fieldName = "a_float",
GQ.fieldType = GQ.FloatType
},
GQ.ObjectField{
GQ.fieldName = "a_int",
GQ.fieldType = GQ.IntType
},
GQ.ObjectField{
GQ.fieldName = "a_bool",
GQ.fieldType = GQ.BooleanType
}
]
}
@?=
("class MyObj(graphene.ObjectType):\n" ++
" a_float = graphene.Float()\n" ++
" a_int = graphene.Int()\n" ++
" a_bool = graphene.Boolean()")
-- , testCase "PersonType example" $
-- Object ObjectFields {
-- objName = "Person",
-- objFields = [
-- ObjectField {
-- fieldName = "name",
-- fieldType = String
-- },
-- ObjectField {
-- fieldName = "bestFriend",
-- fieldType = String
-- }
-- ]
-- }
-- the following test does not hold
--, testCase "List comparison (same length)" $
-- [1, 2, 3] `compare` [1,2,2] @?= LT
]
tests :: TestTree
tests = testGroup "Tests" [properties, unitTests]
main :: IO ()
main = defaultMain tests
| wtanaka/graphql-schema | tests/Spec.hs | gpl-2.0 | 4,129 | 0 | 14 | 1,197 | 912 | 516 | 396 | 78 | 1 |
module Tema_21d_PolPropiedades_Spec (main, spec) where
import Tema_21.PolPropiedades
import Test.Hspec
import Test.QuickCheck
main :: IO ()
main = hspec spec
spec :: Spec
spec = do
describe "Propiedades de los polinomios" $ do
it "p1" $
property prop_polCero_es_cero
it "p2" $
property prop_consPol_no_cero
it "p3" $
property prop_consPol
it "p4" $
property prop_grado
it "p5" $
property prop_coefLider
it "p6" $
property prop_restoPol
| jaalonso/I1M-Cod-Temas | test/Tema_21d_PolPropiedades_Spec.hs | gpl-2.0 | 501 | 0 | 11 | 127 | 143 | 66 | 77 | 21 | 1 |
data Stack a = Nil
| Stack a (Stack a)
push :: Stack a -> a -> Stack a
push s x = Stack x s
pop :: Stack a -> (a, Stack a)
pop (Stack x s) = (x, s)
data Token = Number Integer
| Operation Char
parseToken :: String -> Token
parseToken "+" = Operation '+'
parseToken "-" = Operation '-'
parseToken "*" = Operation '*'
parseToken x = Number $ read x
calc :: [Token] -> Integer
calc = (`calc'` Nil)
where
calc' :: [Token] -> Stack Integer -> Integer
calc' [] (Stack x _) = x
calc' ((Operation c):cs)
(Stack y (Stack x s))
| c == '+' = calc' cs (push s (x + y))
| c == '-' = calc' cs (push s (x - y))
| c == '*' = calc' cs (push s (x * y))
| otherwise = error "No such operation"
calc' ((Number x):cs) s = calc' cs (push s x)
fileName :: String
fileName = "postfix"
interactFiles :: (String -> String) -> IO()
interactFiles f = do
inp <- readFile $ fileName ++ ".in"
writeFile (fileName ++ ".out") (f inp)
main = interactFiles $ show . calc . (map parseToken) . words
| zakharvoit/discrete-math-labs | Season2/LinearDataStructures/Postfix/Postfix.hs | gpl-3.0 | 1,068 | 0 | 12 | 309 | 519 | 263 | 256 | 31 | 3 |
{-# LANGUAGE TemplateHaskell, FunctionalDependencies, FlexibleInstances, RankNTypes #-}
{-# OPTIONS_GHC -funbox-strict-fields #-}
module Model where
import qualified Data.DList as DL
import qualified Data.Vector.Storable as S
import Graphics.Rendering.OpenGL.Raw
import Control.Monad.State.Strict
import Control.Lens
import Geometry
import OBJ
-- | A face is a triangle, defined by indices in _vertices, _normals and/or _uvs
-- When a tuple is used, the order of indices is the same as the order of the compounded
-- words for the constructor name.
--
-- As per .obj, indices start at 1, and /NOT/ 1.
data Face
-- | "f a b c d" in a .obj, corresponds to FV
= Verts !I !I !I
-- | "f a/a b/b c/c d/d" in a .obj, corresponds to FVT
| VertTex !(I,I) !(I,I) !(I,I)
-- | "f a//a b//b c//c d//d" in a .obj, corresponds to FVN
| VertNorm !(I,I) !(I,I) !(I,I)
-- | "f a/a/a b/b/b c/c/c d/d/d" in a .obj, corresponds to FVTN
| VertTexNorm !(I,I,I) !(I,I,I) !(I,I,I)
-- | Generic 'Model' type.
data ModelT f = Model
{ _mVertices :: !(f V3)
, _mNormals :: !(f V3)
, _mUvs :: !(f V2)
, _mFaces :: !(f Face)
}
data Mesh = Mesh
{ _gVerts :: !V
, _gNorms :: !V
, _gUvs :: !V
, _gFaces :: !(Vector GLushort)
} deriving (Eq,Show,Read)
makeFields ''ModelT
makeFields ''Mesh
--------------------------------------------------------------------------------
-- Building models from .obj
-- | 'Builder' is an efficient type used to /build/ a mesh from a list of .obj commands
type Builder = State (ModelT DL.DList) ()
type Built = ModelT []
emptyDListModel :: ModelT DL.DList
emptyDListModel = Model DL.empty DL.empty DL.empty DL.empty
addObjCommand :: ObjCommand -> Builder
addObjCommand obj = case obj of
V x y z -> addTo vertices (vec3 x y z)
VN x y z -> addTo normals (vec3 x y z)
VT x y -> addTo uvs (vec2 x y)
FV a b c -> addTo faces (Verts a b c)
FVT a b c -> addTo faces (VertTex a b c)
FVN a b c -> addTo faces (VertNorm a b c)
FVTN a b c -> addTo faces (VertTexNorm a b c)
_ -> return ()
where
addTo :: Lens' (ModelT DL.DList) (DL.DList a) -> a -> Builder
addTo label a = label %= (`DL.snoc` a)
runBuilder :: Builder -> Built
runBuilder b = case execState b emptyDListModel of
Model v n u f -> Model (DL.toList v) (DL.toList n) (DL.toList u) (DL.toList f)
faceToIndices :: Face -> [GLushort]
faceToIndices = map (subtract 1 . fromIntegral) . toIndices
where
toIndices (Verts x y z) = [x,y,z]
toIndices (VertTex (x,_) (y,_) (z,_)) = [x,y,z]
toIndices (VertNorm (x,_) (y,_) (z,_)) = [x,y,z]
toIndices (VertTexNorm (x,_,_) (y,_,_) (z,_,_)) = [x,y,z]
builtToMesh :: Built -> Mesh
builtToMesh (Model v n u f) = Mesh
(S.concat v)
(S.concat n)
(S.concat u)
(S.fromList (concatMap faceToIndices f))
loadMesh :: FilePath -> IO Mesh
loadMesh path = do
Right obj <- readObj path
return $! builtToMesh . runBuilder $! forM_ obj addObjCommand
| mikeplus64/plissken | src/Model.hs | gpl-3.0 | 3,140 | 0 | 11 | 817 | 1,078 | 577 | 501 | 103 | 8 |
{-# LANGUAGE TemplateHaskell #-}
-- | The themes/ config format
module Lamdu.Config.Theme.Name where
import qualified Control.Lens as Lens
import qualified Data.Aeson.TH.Extended as JsonTH
import Data.Vector.Vector2 (Vector2)
import qualified GUI.Momentu.Draw as Draw
import Lamdu.Prelude
data Name = Name
{ _tagCollisionSuffixBGColor :: Draw.Color
, _textCollisionSuffixBGColor :: Draw.Color
, _collisionSuffixScaleFactor :: Vector2 Double
} deriving (Eq, Show, Generic)
JsonTH.derivePrefixed "_" ''Name
Lens.makeLenses ''Name
| Peaker/lamdu | src/Lamdu/Config/Theme/Name.hs | gpl-3.0 | 568 | 0 | 9 | 100 | 124 | 76 | 48 | 14 | 0 |
module Moonbase.Theme
( Color
, color_
, FontAttr(..)
, Font(..)
, bold, italic
, size, sans, monospace, droid, droidMono
, Theme(..)
, Style(..)
, fg, font, bg
, defaultTheme
, defaultColor
) where
import Control.Applicative
import Data.Char
-- * Color & helpers
-- | A simple hex based representation of a color
-- following the syntax #rrggbbaa. Html uses it as well.
type Color = String
-- | checks if a color is valid. This function returns magenta if a invalid color was entered.
-- for Example:
-- >>> color_ "#ffffff"
-- "#ffffff"
-- >>> color_ "#fff"
-- "#ffffff"
-- >>> color_ "#abcxef"
-- "#ff00ff"
color_ :: Color -> Color
color_ ['#', r, g, b] = ['#', r, r, g, g, b, b]
color_ ['#', r1, r2, g1, g2, b1, b2] = case checkHex [r1, r2, g1, g2, b1, b2] of
Just x -> '#' : x
Nothing -> "#ff00ff"
color_ _ = "#ff00ff"
checkHex :: String -> Maybe String
checkHex [] = Just []
checkHex (x:xs) = if isHexDigit x
then (x :) <$> checkHex xs
else Nothing
-- * Fonts & helpers
-- | font settings
data FontAttr = Default -- ^ No attribute at all, just plain text
| Bold -- ^ Bold text
| Thin -- ^ More thin than normal
| Thick -- ^ Something between bold and normal
| Italic -- ^ Italic font
| Underline -- ^ Underline the font
deriving (Show, Eq)
-- | Font definition
-- Each font has to be a explicit name to match. Size can be selected and attributes added
-- You can generate your own Font definitions by using the constructor.
-- e.g
-- >>> Font "Droid Sans" 12 []
-- >>> Font "Droid Sans Mono" 12 [Thin, Italic]
data Font = Font
{ fontName :: String -- ^ the name of the font
, fontSize :: Int -- ^ size of the font in px
, fontAttrs :: [FontAttr] } -- ^ attributes how the font should be displayed
deriving (Show, Eq)
-- | marks a font as bold
bold :: Font -> Font
bold f = f { fontAttrs = Bold : fontAttrs f }
-- | marks a font as italic
italic :: Font -> Font
italic f = f { fontAttrs = Italic : fontAttrs f }
-- | set the size of the font
size :: Int -> Font -> Font
size size' font = font { fontSize = size' }
-- * Predefined fonts
-- | fallback sans
sans :: Font
sans = Font "Sans" 12 []
-- | fallback monospace
monospace :: Font
monospace = Font "Monospace" 12 []
-- | droid sans
droid :: Font
droid = Font "Droid Sans" 12 []
droidMono :: Font
droidMono = Font "Droid Sans Mono" 12 []
-- * Theme
-- All the basic theming of moonbase works with a Theme. You define your own theme or use the default one.
-- @
-- blackBackground :: Color
-- blackBackground = "#000"
--
-- myTheme :: Theme
-- myTheme = Theme
-- { normal = Style #fff" sans blackBackground
-- , highlight = Style "#f00" sans blackBackground
-- , active = Style "#0f0" sans blackBackground
-- , disabled = Style "#151515" sans blackBackground
-- , frame = Style "#0f0" sans blackBackground }
--
-- main :: IO ()
-- main = moonbase $ do
-- withTheme myTheme
-- ...
-- @
-- | To make it more easy all different modes come with a triple of settings
-- A foreground, font and a background color.
data Style = Style Color Font Color
deriving (Show, Eq)
-- | get the foreground
fg :: Style -> Color
fg (Style c _ _) = c
-- | get the font definition
font :: Style -> Font
font (Style _ f _) = f
-- | get the background color
bg :: Style -> Color
bg (Style _ _ c) = c
-- TODO: Add a map which value is used where
-- | Minimal theme used to style everything if not set otherwise
data Theme = Theme
{ normal :: Style -- ^ when everything is normal
, highlight :: Style -- ^ Need to highlight something or a message
, active :: Style -- ^ If something is active (e.g the border of xmonad)
, disabled :: Style -- ^ if something is disabled
, frame :: Style } -- ^ the frame of something (e.g bar)
deriving (Show, Eq)
-- | moonbase default theme
defaultTheme :: Theme
defaultTheme = Theme
{ normal = Style "#ffffff" sans defaultBg
, highlight = Style "#268BD2" (bold sans) defaultBg
, active = Style "#9ec400" sans defaultBg
, disabled = Style "#808080" (italic sans) defaultBg
, frame = Style "#151515" sans defaultBg }
where
defaultBg = "#242424"
-- | the default fallback color: magenta
defaultColor :: Color
defaultColor = "#ff00ff"
| felixsch/moonbase | src/Moonbase/Theme.hs | gpl-3.0 | 4,571 | 0 | 9 | 1,284 | 863 | 521 | 342 | 77 | 2 |
--------------------------------------------------------------------------------
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RankNTypes #-}
--------------------------------------------------------------------------------
-- | Defines colours used in the XMonad and XMobar configurations.
module Utils.Color where
--------------------------------------------------------------------------------
import Data.String
--------------------------------------------------------------------------------
type Color = forall a. IsString a => a
--------------------------------------------------------------------------------
background :: Color
background = "#fefefe"
backgroundInactive :: Color
backgroundInactive = background
backgroundActive :: Color
backgroundActive = "#ca3435"
--------------------------------------------------------------------------------
textRegular :: Color
textRegular = "#050505"
textWarning :: Color
textWarning = "#e77200"
textAlert :: Color
textAlert = "#962727"
textFocusedBg :: Color
textFocusedBg = textRegular
textFocusedFg :: Color
textFocusedFg = background
textTitleBg :: Color
textTitleBg = ""
textTitleFg :: Color
textTitleFg = "#4848ff"
--------------------------------------------------------------------------------
iconStaticBg :: Color
iconStaticBg = "#f3fcf3"
iconStaticFg :: Color
iconStaticFg = textRegular
iconInactiveBg :: Color
iconInactiveBg = background
iconInactiveFg :: Color
iconInactiveFg = textRegular
iconActiveBg :: Color
iconActiveBg = "#bfdec8"
iconActiveFg :: Color
iconActiveFg = textRegular
iconAlertBg :: Color
iconAlertBg = "#fef0f0"
iconAlertFg :: Color
iconAlertFg = textRegular
--------------------------------------------------------------------------------
popupBg :: Color
popupBg = "#f3fcf3"
popupFg :: Color
popupFg = "#050505"
--------------------------------------------------------------------------------
| d12frosted/environment | xmonad/lib/Utils/Color.hs | gpl-3.0 | 1,909 | 0 | 7 | 193 | 241 | 149 | 92 | 45 | 1 |
{-# LANGUAGE TypeSynonymInstances, MultiParamTypeClasses, FlexibleInstances #-}
{- | We implement real numbers as the completion of dyadic intervals. The whole construction is
parametrized by an approximate field, an example of which is "Dyadic".
-}
module Reals where
import Data.Ratio
import Staged
import Space
import Dyadic
import Interval
-- | A real number is implemented as a staged dyadic interval @'Interval' q@ where @q@ is the
-- underlying approximate field (in practiec these are dyadic rationals). @'RealNum' q@ can be used
-- to represent not only real numbers but also the elements of the interval domain, including the
-- back-to-front intervals.
type RealNum q = Staged (Interval q)
-- | We implement a very simple show instance for reals which computes the 20th approximation
-- and shows it as an interval, together with a floating point approximation.
instance ApproximateField q => Show (RealNum q) where
show x = let i = approximate x (prec RoundDown 20)
in show i ++ " " ++ show (toFloat (midpoint (lower i) (upper i)))
-- | Linear order on real numbers
instance IntervalDomain q => LinearOrder (RealNum q) where
less = lift2 (\_ -> iless)
-- | It is a bad idea to use Haskell-style inequality @/=@ on reals because it either returns @True@
-- or it diverges. Similarly, using Haskell equality @==@ is bad. Nevertheless, we define @==@ and @/=@
-- because Haskell wants them for numeric types.
instance IntervalDomain q => Eq (RealNum q) where
x /= y = force $ x `apart` y
-- | Real numbers are an ordered type in the sense of Haskells 'Ord', although a comparison never
-- returns @EQ@ (instead it diverges). This is a fact of life, comparison of reals is not decidable.
instance IntervalDomain q => Ord (RealNum q) where
compare x y = case force (x `less` y) of
True -> LT
False -> GT
-- | The ring structure fo the reals.
instance (ApproximateField q, IntervalDomain q) => Num (RealNum q) where
x + y = lift2 iadd x y
x - y = lift2 isub x y
x * y = lift2 imul x y
abs x = lift1 iabs x
signum x = do i <- x
s <- get_stage
return $ Interval { lower = app_signum s (lower i),
upper = app_signum (anti s) (upper i) }
fromInteger k = do s <- get_stage
return $ Interval { lower = app_fromInteger s k,
upper = app_fromInteger (anti s) k }
-- | Division and reciprocals.
instance (ApproximateField q, IntervalDomain q) => Fractional (RealNum q) where
x / y = lift2 idiv x y
recip x = lift1 iinv x
fromRational r = fromInteger (numerator r) / fromInteger (denominator r)
-- | The Hausdorff property
instance IntervalDomain q => Hausdorff (RealNum q) where
x `apart` y = (x `less` y) `sor` (y `less` x)
-- | The value @ClosedInterval(a,b)@ represents the closed interval [a,b] as a subspace of the reals.
newtype ClosedInterval q = ClosedInterval (q, q)
-- | Compactness of the closed interval
instance IntervalDomain q => Compact (ClosedInterval q) (RealNum q) where
forall (ClosedInterval(a,b)) p =
limit (\s ->
let r = rounding s
n = precision s
test_interval u v = case r of
RoundDown -> Interval {lower = u, upper = v}
RoundUp -> let w = midpoint u v in Interval {lower = w, upper = w}
sweep [] = True
sweep ((k,a,b):lst) = let x = return $ test_interval a b
in case (r, approximate (p x) (prec r k)) of
(RoundDown, False) -> (k < n) &&
(let c = midpoint a b in sweep (lst ++ [(k+1,a,c), (k+1,c,b)]))
(RoundDown, True) -> sweep lst
(RoundUp, False) -> False
(RoundUp, True) -> (k >= n) ||
(let c = midpoint a b in sweep (lst ++ [(k+1,a,c), (k+1,c,b)]))
in sweep [(0,a,b)]
)
-- | Overtness of reals, open interval (a,b) and closed interval [a,b]
instance IntervalDomain q => Overt (ClosedInterval q) (RealNum q) where
exists (ClosedInterval (a,b)) p =
limit (\s ->
let r = rounding s
n = precision s
test_interval u v = case r of
RoundUp -> Interval {lower = v, upper = u}
RoundDown -> let w = midpoint u v in Interval {lower = w, upper = w}
sweep [] = False
sweep ((k,a,b):lst) = let x = return $ test_interval a b
in case (r, approximate (p x) (prec r k)) of
(RoundDown, False) -> if (k < n)
then (let c = midpoint a b in sweep (lst ++ [(k+1,a,c), (k+1,c,b)]))
else False
(RoundDown, True) -> True
(RoundUp, False) -> sweep lst
(RoundUp, True) -> (k >= n) ||
(let c = midpoint a b in sweep (lst ++ [(k+1,a,c), (k+1,c,b)]))
in sweep [(0,a,b)]
)
-- | We define the a particular implementation of reals in terms of Dyadic numbers.
instance IntervalDomain Dyadic where
width Interval{lower=a, upper=b} = b - a
-- | This is a convenience function which allows us to write @exact 1.3@ as a
-- conversion from floating points to real numbers. There probably is a better way of
-- doing this.
exact :: RealNum Dyadic -> RealNum Dyadic
exact x = x
-- | Function convert elements of type @q@ in elements of type @RealNum q@.
toReal :: IntervalDomain q => q -> RealNum q
toReal x = limit $ \s -> Interval { lower = normalize s x, upper = normalize (anti s) x }
-- | Reals form a complete space, which means that every Cauchy sequence of reals has
-- a limit. In the implementation this is manifested by the existence of an operator
-- which computes the limit of a Cauchy sequence. The error bounds for the sequence are
-- given explicitly.
lim :: IntervalDomain q => (Int -> (RealNum q, q)) -> RealNum q
lim x =
limit (\s ->
let r = rounding s
n = precision s
border_lower i j = app_sub s' (lower (approximate (fst $ x i) s')) (snd $ x i)
where s' = prec_down j
border_upper i j = app_add s' (upper (approximate (fst $ x i) s')) (snd $ x i)
where s' = prec_up j
in case r of
RoundDown -> Interval {lower = maximum [border_lower i n | i <- [0..n]], upper = minimum [border_upper i n | i <- [0..n]]}
RoundUp -> Interval {lower = minimum [border_upper i n | i <- [0..n]], upper = maximum [border_lower i n | i <- [0..n]]}
)
-- | Reals form an Archimedean field. Topologically speaking, this means that the
-- underlying approximate field @q@ is dense in the reals. Computationally this means
-- that we can compute arbitrarily good @q@-approximations to real numbers. The
-- function 'approx_to x k r' computes an approximation @a@ of type @q@ which is within
-- @2^-k@ of @x@.
approx_to :: IntervalDomain q => RealNum q -> Int -> RoundingMode -> (q, Int)
approx_to x k r = let r10 = abs (toRational' (width (approximate x (prec r 10))))
r20 = abs (toRational' (width (approximate x (prec r 20))))
n = case r20 == 0 of
True -> 20
False -> let a = ceiling (r10^2/r20)
in (ilogb 2 a)+k
i = approximate x (prec r n)
q = 2/2^k
in case abs (toRational' (width i)) < q of
True -> (midpoint (lower i) (upper i), n)
False -> loop (n+1)
where loop m = let i = approximate x (prec r m)
in case abs (toRational' (width i)) < q of
True -> (midpoint (lower i) (upper i), m)
False -> loop (m+1)
fac :: Rational -> Rational
fac n = product [1..n]
tI :: Rational -> Integer
tI r = numerator r
ilogb :: Integer -> Integer -> Int
ilogb b n | n < 0 = ilogb b (- n)
| n < b = 0
| otherwise = (up b n 1) - 1
where up b n a = if n < (b ^ a)
then bin b (quot a 2) a
else up b n (2*a)
bin b lo hi = if (hi - lo) <= 1
then hi
else let av = quot (lo + hi) 2
in if n < (b ^ av)
then bin b lo av
else bin b av hi
-- | Instance floating for reals uses Taylor's series and error bounds. (Missing: Atanh, Atan, Acos, Acosh)
-- Functions (Cos, Sin, Exp, Tan, Cosh, Sinh) makes good approximations in short time for elements inside
-- the interval (-30,30) and for integers. Log is defined for elements inside (0,2) and Asinh for elements
-- inside (-1,1). For more details see http://www.diva-portal.org/smash/get/diva2:310454/FULLTEXT01.pdf
instance IntervalDomain q => Floating (RealNum q) where
pi = limit(\s ->
let r = rounding s
n = precision s
border k r'= let k' = toRational k
serie = 3 + 4 * (sum [ (-1)^((tI i)-1)/((2*i)*(2*i+1)*(2*i+2))|i <- [1..k']])
in app_fromRational (prec r' k) serie
in case r of
RoundDown -> Interval {lower = (border (2*n) RoundDown), upper = (border (2*n+1) RoundUp)}
RoundUp -> Interval {lower = (border (2*n+1) RoundUp), upper = (border (2*n) RoundDown)}
)
exp x = limit (\s->
let r = rounding s
n = precision s + 4
sig = if r == RoundDown then 1 else (-1)
q1 = toRational' (lower (approximate x (prec r 4)))
q2 = toRational' (upper (approximate x (prec r 4)))
m = ceiling (maximum [abs q1, abs q2])
m' = toRational m
v = n+1+(ilogb 2 (3^m))
u = loop m'
where loop p = let m1 = 2^n*(3^m)*m'^((tI p)+1)
m2 = fac (p+1)
in case m2 >= m1 of
True -> p
False -> loop (p+1)
serie t = sum [t^(tI i)/(fac i)|i <- [0..u]]
k = maximum [snd (approx_to x (v+1) r), n]
x1 = toRational' (lower (approximate x (prec r k)))
x2 = toRational' (upper (approximate x (prec r k)))
remainder = 3/2^n
s' = prec r k
part1 = app_fromRational s' ((serie x1) - sig*remainder)
part2 = app_fromRational (anti s') ((serie x2) + sig*remainder)
in Interval {lower = part1, upper = part2}
)
sinh x = ((exp x) - (exp (-x)))/2
cosh x = ((exp x) + (exp (-x)))/2
asinh x = limit (\s->
let r = rounding s
n = precision s
border h k m = let (t, r') = case m of
-1 -> (toRational' (lower (approximate x (prec_down h))), RoundDown)
1 -> (toRational' (upper (approximate x (prec_down h))), RoundUp)
h' = toRational h
(serie, remainder) = case (t >= 1,t <= -1) of
(False, False) -> (sum [(-1)^(tI i)*(fac (2*i))*t^(2*(tI i)+1)/(2^(2*(tI i))*(fac i)^2*(2*i+1))|i <- [0..h']],
1/(1-(abs t))*(abs t)^(2*(tI h')+2))
(True, False) -> (1,0)
(False, True) -> ((-1),0)
part = serie + m*remainder
in app_fromRational (prec r' k) part
in case r of
RoundDown -> Interval {lower = maximum [border i (2*n) (-1)| i <- [0,2..(2*n)]], upper = minimum [border i (2*n) 1| i <- [0,2..(2*n)]]}
RoundUp -> Interval {lower = minimum [border i (2*n) 1| i <- [0,2..(2*n)]], upper = maximum [border i (2*n) (-1)| i <- [0,2..(2*n)]]}
)
cos x = limit (\s->
let r = rounding s
n = precision s + 4
sig = if r == RoundDown then 1 else (-1)
q1 = toRational' (lower (approximate x (prec r 4)))
q2 = toRational' (upper (approximate x (prec r 4)))
m = ceiling (maximum [abs q1, abs q2])
m' = toRational m
v = n + (ilogb 2 (3^m))
u = loop m'
where loop p = let m1 = 2^n*(3^m+1)*m'^(2*(tI p)+2)
m2 = 2*(fac (2*p+2))
in case m2 >= m1 of
True -> p
False -> loop (p+1)
serie t = sum [(-1)^(tI i)*t^(2*(tI i))/(fac (2*i))|i <- [0..u]]
k = maximum [snd (approx_to x (v+1) r), n]
x1 = toRational' (lower (approximate x (prec r k)))
x2 = toRational' (upper (approximate x (prec r k)))
remainder = 3/2^n
s' = prec r k
part1 = app_fromRational s' ((serie x1) - sig*remainder)
part2 = app_fromRational (anti s') ((serie x2) + sig*remainder)
in Interval {lower = part1, upper = part2}
)
sin x = limit (\s->
let r = rounding s
n = precision s + 4
sig = if r == RoundDown then 1 else (-1)
q1 = toRational' (lower (approximate x (prec r 4)))
q2 = toRational' (upper (approximate x (prec r 4)))
m = ceiling (maximum [abs q1, abs q2])
m' = toRational m
v = n + (ilogb 2 (3^m+1))
u = loop m'
where loop p = let m1 = 2^n*(3^m+1)*m'^(2*(tI p)+3)
m2 = 2*(fac (2*p+3))
in case m2 >= m1 of
True -> p
False -> loop (p+1)
serie t = sum [(-1)^(tI i)*t^(2*(tI i)+1)/(fac (2*i+1))|i <- [0..u]]
k = maximum [snd (approx_to x (v+1) r), n]
x1 = toRational' (lower (approximate x (prec r k)))
x2 = toRational' (upper (approximate x (prec r k)))
remainder = 3/2^n
s' = prec r k
part1 = app_fromRational s' ((serie x1) - sig*remainder)
part2 = app_fromRational (anti s') ((serie x2) + sig*remainder)
in Interval {lower = part1, upper = part2}
)
atanh x = (log (1+x) - log (1-x))/2
log x = let b = compare x 0
in case b of
LT -> error "Not defined"
GT -> let b' = compare x 2
in case b' of
LT -> let b'' = compare x 1
in case b'' of
LT -> limit (\s->
let r = rounding s
n = precision s + 1
border h k m = let (t,r') = case m of
-1 -> (toRational' (lower (approximate x (prec RoundDown h))),RoundDown)
1 -> (toRational' (upper (approximate x (prec RoundDown h))),RoundUp)
h' = toRational h
serie = -sum [(-1)^(tI i)*(-1+t)^(tI i)/i|i <- [1..h']]
remainder = 3^(ceiling (abs t))/2*(-1+t)^(tI h'+1)/(h'+1)
part = serie + m*remainder
in app_fromRational (prec r' k) part
in case r of
RoundDown -> Interval {lower = maximum [border i n (-1)| i <- [1..n]], upper = minimum [border i n 1| i <- [1..n]]}
RoundUp -> Interval {lower = minimum [border i n 1| i <- [1..n]], upper = maximum [border i n (-1)| i <- [1..n]]}
)
GT -> limit (\s->
let r = rounding s
n = precision s + 1
border h k m = let (t,r') = case m of
-1 -> (toRational' (lower (approximate x (prec RoundDown h))),RoundDown)
1 -> (toRational' (upper (approximate x (prec RoundDown h))),RoundUp)
h' = toRational h
serie = -sum [(-1)^(tI i)*(-1+t)^(tI i)/i|i <- [1..h']]
remainder = (-1+t)^(tI h'+1)/(h'+1)
part = serie + m*remainder
in app_fromRational (prec r' k) part
in case r of
RoundDown -> Interval {lower = maximum [border i n (-1)| i <- [1..n]], upper = minimum [border i n 1| i <- [1..n]]}
RoundUp -> Interval {lower = minimum [border i n 1| i <- [1..n]], upper = maximum [border i n (-1)| i <- [1..n]]}
)
GT -> error "Not defined" | aljosaVodopija/eksaktnaRealna | Reals.hs | gpl-3.0 | 19,988 | 11 | 44 | 9,552 | 6,545 | 3,417 | 3,128 | 264 | 4 |
{-# LANGUAGE ViewPatterns #-}
-- | Different kernels
module MLLabs.Kernel
( Kernel
, gaussKernel
, triangularKernel
, rectangularKernel
, quarticKernel
) where
import Data.Bool (bool)
type Kernel = Double -> Double
-- | Accepts σ as input parameter, suppose b = 0
gaussKernel :: Double -> Kernel
gaussKernel σ x =
1 / (σ * sqrt(2 * pi)) *
exp (- 1 / (2 * σ**2) * x**2)
-- | Triangular kernel
triangularKernel :: Kernel
triangularKernel (abs -> x) = bool 0 (1 - x) (x <= 1)
-- | Rectangular kernel
rectangularKernel :: Kernel
rectangularKernel (abs -> x) = bool 0 1 (x <= 1)
quarticKernel :: Kernel
quarticKernel u = if abs u <= 1 then 15/16*(1-u**2)**2 else 0
| zhenyavinogradov/ml-labs | src/MLLabs/Kernel.hs | gpl-3.0 | 732 | 0 | 13 | 189 | 249 | 138 | 111 | 19 | 2 |
module MatchRegexp (matchHere, match) where
import Regexp(Regexp(Literal,
AnyChar,
Or,
OneOrMore,
ZeroOrMore,
Sequence,
Optional,
AtEnd,
AtStart))
import Data.List(nub)
match :: Regexp -> String -> [String]
match regexp text = nub $ matchRegexp regexp text
matchRegexp :: Regexp -> String -> [String]
matchRegexp _ [] = []
matchRegexp (AtStart r) text = map fst $ matchHere r text
matchRegexp r text@(_:xs)
= map fst (matchHere r text) ++ matchRegexp r xs
matchHere :: Regexp -> String -> [(String, String)]
matchHere _ [] = []
matchHere (Literal r) text
| take (length r) text == r = [(r, drop (length r) text)]
| otherwise = []
matchHere AnyChar (t:ts)
= [([t], ts)]
matchHere (Or r1 r2) text
= matchHere r1 text ++ matchHere r2 text
matchHere (OneOrMore r) text
| null matched = []
| otherwise = matched ++ getMoreMatches r matched
where matched = matchHere r text
matchHere (ZeroOrMore r) text =
("", text):matchHere (OneOrMore r) text
matchHere (Sequence first second) text
| null firstMatches = []
| otherwise = getSecondMatches firstMatches
where firstMatches = matchHere first text
getSecondMatches [] = []
getSecondMatches ((matched, remainder):ms)
| null $ matchHere second remainder = []
| otherwise = knit matched (matchHere second remainder) ++
getSecondMatches ms
matchHere (Optional regexp) text
| null matches = [("", text)]
| otherwise = matches
where matches = matchHere regexp text
matchHere (AtEnd regexp) text =
filter (null . snd) $ matchHere regexp text
matchHere (AtStart regexp) _ =
error "Misuse of ^"
getMoreMatches :: Regexp -> [(String, String)] -> [(String, String)]
getMoreMatches _ [] = []
getMoreMatches regexp ((matched, remainder):mms)
| null $ matchHere regexp remainder = getMoreMatches regexp mms
| otherwise =
knit matched (matchHere regexp remainder) ++
getMoreMatches regexp (knit matched (matchHere regexp remainder) ++ mms)
knit :: String -> [(String, String)] -> [(String, String)]
knit _ [] = []
knit matched ((a,b):matches)
= (matched ++ a, b):knit matched matches
| srank/regexp | MatchRegexp.hs | gpl-3.0 | 2,412 | 0 | 12 | 715 | 911 | 471 | 440 | 61 | 2 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Genomics.Reads.Search
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Gets a list of reads for one or more read group sets. For the
-- definitions of read group sets and other genomics resources, see
-- [Fundamentals of Google
-- Genomics](https:\/\/cloud.google.com\/genomics\/fundamentals-of-google-genomics)
-- Reads search operates over a genomic coordinate space of reference
-- sequence & position defined over the reference sequences to which the
-- requested read group sets are aligned. If a target positional range is
-- specified, search returns all reads whose alignment to the reference
-- genome overlap the range. A query which specifies only read group set
-- IDs yields all reads in those read group sets, including unmapped reads.
-- All reads returned (including reads on subsequent pages) are ordered by
-- genomic coordinate (by reference sequence, then position). Reads with
-- equivalent genomic coordinates are returned in an unspecified order.
-- This order is consistent, such that two queries for the same content
-- (regardless of page size) yield reads in the same order across their
-- respective streams of paginated responses. Implements
-- [GlobalAllianceApi.searchReads](https:\/\/github.com\/ga4gh\/schemas\/blob\/v0.5.1\/src\/main\/resources\/avro\/readmethods.avdl#L85).
--
-- /See:/ <https://cloud.google.com/genomics Genomics API Reference> for @genomics.reads.search@.
module Network.Google.Resource.Genomics.Reads.Search
(
-- * REST Resource
ReadsSearchResource
-- * Creating a Request
, readsSearch
, ReadsSearch
-- * Request Lenses
, reaXgafv
, reaUploadProtocol
, reaPp
, reaAccessToken
, reaUploadType
, reaPayload
, reaBearerToken
, reaCallback
) where
import Network.Google.Genomics.Types
import Network.Google.Prelude
-- | A resource alias for @genomics.reads.search@ method which the
-- 'ReadsSearch' request conforms to.
type ReadsSearchResource =
"v1" :>
"reads" :>
"search" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "pp" Bool :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "bearer_token" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
ReqBody '[JSON] SearchReadsRequest :>
Post '[JSON] SearchReadsResponse
-- | Gets a list of reads for one or more read group sets. For the
-- definitions of read group sets and other genomics resources, see
-- [Fundamentals of Google
-- Genomics](https:\/\/cloud.google.com\/genomics\/fundamentals-of-google-genomics)
-- Reads search operates over a genomic coordinate space of reference
-- sequence & position defined over the reference sequences to which the
-- requested read group sets are aligned. If a target positional range is
-- specified, search returns all reads whose alignment to the reference
-- genome overlap the range. A query which specifies only read group set
-- IDs yields all reads in those read group sets, including unmapped reads.
-- All reads returned (including reads on subsequent pages) are ordered by
-- genomic coordinate (by reference sequence, then position). Reads with
-- equivalent genomic coordinates are returned in an unspecified order.
-- This order is consistent, such that two queries for the same content
-- (regardless of page size) yield reads in the same order across their
-- respective streams of paginated responses. Implements
-- [GlobalAllianceApi.searchReads](https:\/\/github.com\/ga4gh\/schemas\/blob\/v0.5.1\/src\/main\/resources\/avro\/readmethods.avdl#L85).
--
-- /See:/ 'readsSearch' smart constructor.
data ReadsSearch = ReadsSearch'
{ _reaXgafv :: !(Maybe Xgafv)
, _reaUploadProtocol :: !(Maybe Text)
, _reaPp :: !Bool
, _reaAccessToken :: !(Maybe Text)
, _reaUploadType :: !(Maybe Text)
, _reaPayload :: !SearchReadsRequest
, _reaBearerToken :: !(Maybe Text)
, _reaCallback :: !(Maybe Text)
} deriving (Eq,Show,Data,Typeable,Generic)
-- | Creates a value of 'ReadsSearch' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'reaXgafv'
--
-- * 'reaUploadProtocol'
--
-- * 'reaPp'
--
-- * 'reaAccessToken'
--
-- * 'reaUploadType'
--
-- * 'reaPayload'
--
-- * 'reaBearerToken'
--
-- * 'reaCallback'
readsSearch
:: SearchReadsRequest -- ^ 'reaPayload'
-> ReadsSearch
readsSearch pReaPayload_ =
ReadsSearch'
{ _reaXgafv = Nothing
, _reaUploadProtocol = Nothing
, _reaPp = True
, _reaAccessToken = Nothing
, _reaUploadType = Nothing
, _reaPayload = pReaPayload_
, _reaBearerToken = Nothing
, _reaCallback = Nothing
}
-- | V1 error format.
reaXgafv :: Lens' ReadsSearch (Maybe Xgafv)
reaXgafv = lens _reaXgafv (\ s a -> s{_reaXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
reaUploadProtocol :: Lens' ReadsSearch (Maybe Text)
reaUploadProtocol
= lens _reaUploadProtocol
(\ s a -> s{_reaUploadProtocol = a})
-- | Pretty-print response.
reaPp :: Lens' ReadsSearch Bool
reaPp = lens _reaPp (\ s a -> s{_reaPp = a})
-- | OAuth access token.
reaAccessToken :: Lens' ReadsSearch (Maybe Text)
reaAccessToken
= lens _reaAccessToken
(\ s a -> s{_reaAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
reaUploadType :: Lens' ReadsSearch (Maybe Text)
reaUploadType
= lens _reaUploadType
(\ s a -> s{_reaUploadType = a})
-- | Multipart request metadata.
reaPayload :: Lens' ReadsSearch SearchReadsRequest
reaPayload
= lens _reaPayload (\ s a -> s{_reaPayload = a})
-- | OAuth bearer token.
reaBearerToken :: Lens' ReadsSearch (Maybe Text)
reaBearerToken
= lens _reaBearerToken
(\ s a -> s{_reaBearerToken = a})
-- | JSONP
reaCallback :: Lens' ReadsSearch (Maybe Text)
reaCallback
= lens _reaCallback (\ s a -> s{_reaCallback = a})
instance GoogleRequest ReadsSearch where
type Rs ReadsSearch = SearchReadsResponse
type Scopes ReadsSearch =
'["https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/genomics",
"https://www.googleapis.com/auth/genomics.readonly"]
requestClient ReadsSearch'{..}
= go _reaXgafv _reaUploadProtocol (Just _reaPp)
_reaAccessToken
_reaUploadType
_reaBearerToken
_reaCallback
(Just AltJSON)
_reaPayload
genomicsService
where go
= buildClient (Proxy :: Proxy ReadsSearchResource)
mempty
| rueshyna/gogol | gogol-genomics/gen/Network/Google/Resource/Genomics/Reads/Search.hs | mpl-2.0 | 7,612 | 0 | 19 | 1,691 | 899 | 535 | 364 | 124 | 1 |
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.SecurityCenter.Types.Sum
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
module Network.Google.SecurityCenter.Types.Sum where
import Network.Google.Prelude hiding (Bytes)
-- | State change of the finding between the points in time.
data ListFindingsResultStateChange
= Unused
-- ^ @UNUSED@
-- State change is unused, this is the canonical default for this enum.
| Changed
-- ^ @CHANGED@
-- The finding has changed state in some way between the points in time and
-- existed at both points.
| Unchanged
-- ^ @UNCHANGED@
-- The finding has not changed state between the points in time and existed
-- at both points.
| Added
-- ^ @ADDED@
-- The finding was created between the points in time.
| Removed
-- ^ @REMOVED@
-- The finding at timestamp does not match the filter specified, but it did
-- at timestamp - compare_duration.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable ListFindingsResultStateChange
instance FromHttpApiData ListFindingsResultStateChange where
parseQueryParam = \case
"UNUSED" -> Right Unused
"CHANGED" -> Right Changed
"UNCHANGED" -> Right Unchanged
"ADDED" -> Right Added
"REMOVED" -> Right Removed
x -> Left ("Unable to parse ListFindingsResultStateChange from: " <> x)
instance ToHttpApiData ListFindingsResultStateChange where
toQueryParam = \case
Unused -> "UNUSED"
Changed -> "CHANGED"
Unchanged -> "UNCHANGED"
Added -> "ADDED"
Removed -> "REMOVED"
instance FromJSON ListFindingsResultStateChange where
parseJSON = parseJSONText "ListFindingsResultStateChange"
instance ToJSON ListFindingsResultStateChange where
toJSON = toJSONText
-- | The state of the finding.
data GoogleCloudSecuritycenterV1p1beta1FindingState
= StateUnspecified
-- ^ @STATE_UNSPECIFIED@
-- Unspecified state.
| Active
-- ^ @ACTIVE@
-- The finding requires attention and has not been addressed yet.
| Inactive
-- ^ @INACTIVE@
-- The finding has been fixed, triaged as a non-issue or otherwise
-- addressed and is no longer active.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable GoogleCloudSecuritycenterV1p1beta1FindingState
instance FromHttpApiData GoogleCloudSecuritycenterV1p1beta1FindingState where
parseQueryParam = \case
"STATE_UNSPECIFIED" -> Right StateUnspecified
"ACTIVE" -> Right Active
"INACTIVE" -> Right Inactive
x -> Left ("Unable to parse GoogleCloudSecuritycenterV1p1beta1FindingState from: " <> x)
instance ToHttpApiData GoogleCloudSecuritycenterV1p1beta1FindingState where
toQueryParam = \case
StateUnspecified -> "STATE_UNSPECIFIED"
Active -> "ACTIVE"
Inactive -> "INACTIVE"
instance FromJSON GoogleCloudSecuritycenterV1p1beta1FindingState where
parseJSON = parseJSONText "GoogleCloudSecuritycenterV1p1beta1FindingState"
instance ToJSON GoogleCloudSecuritycenterV1p1beta1FindingState where
toJSON = toJSONText
-- | Represents if the asset was created\/updated\/deleted.
data GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType
= ChangeTypeUnspecified
-- ^ @CHANGE_TYPE_UNSPECIFIED@
-- Unspecified or default.
| Created
-- ^ @CREATED@
-- Newly created Asset
| Updated
-- ^ @UPDATED@
-- Asset was updated.
| Deleted
-- ^ @DELETED@
-- Asset was deleted.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType
instance FromHttpApiData GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType where
parseQueryParam = \case
"CHANGE_TYPE_UNSPECIFIED" -> Right ChangeTypeUnspecified
"CREATED" -> Right Created
"UPDATED" -> Right Updated
"DELETED" -> Right Deleted
x -> Left ("Unable to parse GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType from: " <> x)
instance ToHttpApiData GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType where
toQueryParam = \case
ChangeTypeUnspecified -> "CHANGE_TYPE_UNSPECIFIED"
Created -> "CREATED"
Updated -> "UPDATED"
Deleted -> "DELETED"
instance FromJSON GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType where
parseJSON = parseJSONText "GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType"
instance ToJSON GoogleCloudSecuritycenterV1p1beta1TemporalAssetChangeType where
toJSON = toJSONText
-- | The mode to use for filtering asset discovery.
data AssetDiscoveryConfigInclusionMode
= InclusionModeUnspecified
-- ^ @INCLUSION_MODE_UNSPECIFIED@
-- Unspecified. Setting the mode with this value will disable
-- inclusion\/exclusion filtering for Asset Discovery.
| IncludeOnly
-- ^ @INCLUDE_ONLY@
-- Asset Discovery will capture only the resources within the projects
-- specified. All other resources will be ignored.
| Exclude
-- ^ @EXCLUDE@
-- Asset Discovery will ignore all resources under the projects specified.
-- All other resources will be retrieved.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable AssetDiscoveryConfigInclusionMode
instance FromHttpApiData AssetDiscoveryConfigInclusionMode where
parseQueryParam = \case
"INCLUSION_MODE_UNSPECIFIED" -> Right InclusionModeUnspecified
"INCLUDE_ONLY" -> Right IncludeOnly
"EXCLUDE" -> Right Exclude
x -> Left ("Unable to parse AssetDiscoveryConfigInclusionMode from: " <> x)
instance ToHttpApiData AssetDiscoveryConfigInclusionMode where
toQueryParam = \case
InclusionModeUnspecified -> "INCLUSION_MODE_UNSPECIFIED"
IncludeOnly -> "INCLUDE_ONLY"
Exclude -> "EXCLUDE"
instance FromJSON AssetDiscoveryConfigInclusionMode where
parseJSON = parseJSONText "AssetDiscoveryConfigInclusionMode"
instance ToJSON AssetDiscoveryConfigInclusionMode where
toJSON = toJSONText
-- | The type of events the config is for, e.g. FINDING.
data NotificationConfigEventType
= NCETEventTypeUnspecified
-- ^ @EVENT_TYPE_UNSPECIFIED@
-- Unspecified event type.
| NCETFinding
-- ^ @FINDING@
-- Events for findings.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable NotificationConfigEventType
instance FromHttpApiData NotificationConfigEventType where
parseQueryParam = \case
"EVENT_TYPE_UNSPECIFIED" -> Right NCETEventTypeUnspecified
"FINDING" -> Right NCETFinding
x -> Left ("Unable to parse NotificationConfigEventType from: " <> x)
instance ToHttpApiData NotificationConfigEventType where
toQueryParam = \case
NCETEventTypeUnspecified -> "EVENT_TYPE_UNSPECIFIED"
NCETFinding -> "FINDING"
instance FromJSON NotificationConfigEventType where
parseJSON = parseJSONText "NotificationConfigEventType"
instance ToJSON NotificationConfigEventType where
toJSON = toJSONText
-- | The severity of the finding.
data GoogleCloudSecuritycenterV1p1beta1FindingSeverity
= SeverityUnspecified
-- ^ @SEVERITY_UNSPECIFIED@
-- No severity specified. The default value.
| Critical
-- ^ @CRITICAL@
-- Critical severity.
| High
-- ^ @HIGH@
-- High severity.
| Medium
-- ^ @MEDIUM@
-- Medium severity.
| Low
-- ^ @LOW@
-- Low severity.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable GoogleCloudSecuritycenterV1p1beta1FindingSeverity
instance FromHttpApiData GoogleCloudSecuritycenterV1p1beta1FindingSeverity where
parseQueryParam = \case
"SEVERITY_UNSPECIFIED" -> Right SeverityUnspecified
"CRITICAL" -> Right Critical
"HIGH" -> Right High
"MEDIUM" -> Right Medium
"LOW" -> Right Low
x -> Left ("Unable to parse GoogleCloudSecuritycenterV1p1beta1FindingSeverity from: " <> x)
instance ToHttpApiData GoogleCloudSecuritycenterV1p1beta1FindingSeverity where
toQueryParam = \case
SeverityUnspecified -> "SEVERITY_UNSPECIFIED"
Critical -> "CRITICAL"
High -> "HIGH"
Medium -> "MEDIUM"
Low -> "LOW"
instance FromJSON GoogleCloudSecuritycenterV1p1beta1FindingSeverity where
parseJSON = parseJSONText "GoogleCloudSecuritycenterV1p1beta1FindingSeverity"
instance ToJSON GoogleCloudSecuritycenterV1p1beta1FindingSeverity where
toJSON = toJSONText
-- | Required. The desired State of the finding.
data SetFindingStateRequestState
= SFSRSStateUnspecified
-- ^ @STATE_UNSPECIFIED@
-- Unspecified state.
| SFSRSActive
-- ^ @ACTIVE@
-- The finding requires attention and has not been addressed yet.
| SFSRSInactive
-- ^ @INACTIVE@
-- The finding has been fixed, triaged as a non-issue or otherwise
-- addressed and is no longer active.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable SetFindingStateRequestState
instance FromHttpApiData SetFindingStateRequestState where
parseQueryParam = \case
"STATE_UNSPECIFIED" -> Right SFSRSStateUnspecified
"ACTIVE" -> Right SFSRSActive
"INACTIVE" -> Right SFSRSInactive
x -> Left ("Unable to parse SetFindingStateRequestState from: " <> x)
instance ToHttpApiData SetFindingStateRequestState where
toQueryParam = \case
SFSRSStateUnspecified -> "STATE_UNSPECIFIED"
SFSRSActive -> "ACTIVE"
SFSRSInactive -> "INACTIVE"
instance FromJSON SetFindingStateRequestState where
parseJSON = parseJSONText "SetFindingStateRequestState"
instance ToJSON SetFindingStateRequestState where
toJSON = toJSONText
-- | State change of the asset between the points in time.
data ListAssetsResultStateChange
= LARSCUnused
-- ^ @UNUSED@
-- State change is unused, this is the canonical default for this enum.
| LARSCAdded
-- ^ @ADDED@
-- Asset was added between the points in time.
| LARSCRemoved
-- ^ @REMOVED@
-- Asset was removed between the points in time.
| LARSCActive
-- ^ @ACTIVE@
-- Asset was present at both point(s) in time.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable ListAssetsResultStateChange
instance FromHttpApiData ListAssetsResultStateChange where
parseQueryParam = \case
"UNUSED" -> Right LARSCUnused
"ADDED" -> Right LARSCAdded
"REMOVED" -> Right LARSCRemoved
"ACTIVE" -> Right LARSCActive
x -> Left ("Unable to parse ListAssetsResultStateChange from: " <> x)
instance ToHttpApiData ListAssetsResultStateChange where
toQueryParam = \case
LARSCUnused -> "UNUSED"
LARSCAdded -> "ADDED"
LARSCRemoved -> "REMOVED"
LARSCActive -> "ACTIVE"
instance FromJSON ListAssetsResultStateChange where
parseJSON = parseJSONText "ListAssetsResultStateChange"
instance ToJSON ListAssetsResultStateChange where
toJSON = toJSONText
-- | The state of an asset discovery run.
data GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState
= GCSVRADRSStateUnspecified
-- ^ @STATE_UNSPECIFIED@
-- Asset discovery run state was unspecified.
| GCSVRADRSCompleted
-- ^ @COMPLETED@
-- Asset discovery run completed successfully.
| GCSVRADRSSuperseded
-- ^ @SUPERSEDED@
-- Asset discovery run was cancelled with tasks still pending, as another
-- run for the same organization was started with a higher priority.
| GCSVRADRSTerminated
-- ^ @TERMINATED@
-- Asset discovery run was killed and terminated.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState
instance FromHttpApiData GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState where
parseQueryParam = \case
"STATE_UNSPECIFIED" -> Right GCSVRADRSStateUnspecified
"COMPLETED" -> Right GCSVRADRSCompleted
"SUPERSEDED" -> Right GCSVRADRSSuperseded
"TERMINATED" -> Right GCSVRADRSTerminated
x -> Left ("Unable to parse GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState from: " <> x)
instance ToHttpApiData GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState where
toQueryParam = \case
GCSVRADRSStateUnspecified -> "STATE_UNSPECIFIED"
GCSVRADRSCompleted -> "COMPLETED"
GCSVRADRSSuperseded -> "SUPERSEDED"
GCSVRADRSTerminated -> "TERMINATED"
instance FromJSON GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState where
parseJSON = parseJSONText "GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState"
instance ToJSON GoogleCloudSecuritycenterV1p1beta1RunAssetDiscoveryResponseState where
toJSON = toJSONText
-- | The log type that this config enables.
data AuditLogConfigLogType
= LogTypeUnspecified
-- ^ @LOG_TYPE_UNSPECIFIED@
-- Default case. Should never be this.
| AdminRead
-- ^ @ADMIN_READ@
-- Admin reads. Example: CloudIAM getIamPolicy
| DataWrite
-- ^ @DATA_WRITE@
-- Data writes. Example: CloudSQL Users create
| DataRead
-- ^ @DATA_READ@
-- Data reads. Example: CloudSQL Users list
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable AuditLogConfigLogType
instance FromHttpApiData AuditLogConfigLogType where
parseQueryParam = \case
"LOG_TYPE_UNSPECIFIED" -> Right LogTypeUnspecified
"ADMIN_READ" -> Right AdminRead
"DATA_WRITE" -> Right DataWrite
"DATA_READ" -> Right DataRead
x -> Left ("Unable to parse AuditLogConfigLogType from: " <> x)
instance ToHttpApiData AuditLogConfigLogType where
toQueryParam = \case
LogTypeUnspecified -> "LOG_TYPE_UNSPECIFIED"
AdminRead -> "ADMIN_READ"
DataWrite -> "DATA_WRITE"
DataRead -> "DATA_READ"
instance FromJSON AuditLogConfigLogType where
parseJSON = parseJSONText "AuditLogConfigLogType"
instance ToJSON AuditLogConfigLogType where
toJSON = toJSONText
-- | The state of the finding.
data FindingState
= FSStateUnspecified
-- ^ @STATE_UNSPECIFIED@
-- Unspecified state.
| FSActive
-- ^ @ACTIVE@
-- The finding requires attention and has not been addressed yet.
| FSInactive
-- ^ @INACTIVE@
-- The finding has been fixed, triaged as a non-issue or otherwise
-- addressed and is no longer active.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable FindingState
instance FromHttpApiData FindingState where
parseQueryParam = \case
"STATE_UNSPECIFIED" -> Right FSStateUnspecified
"ACTIVE" -> Right FSActive
"INACTIVE" -> Right FSInactive
x -> Left ("Unable to parse FindingState from: " <> x)
instance ToHttpApiData FindingState where
toQueryParam = \case
FSStateUnspecified -> "STATE_UNSPECIFIED"
FSActive -> "ACTIVE"
FSInactive -> "INACTIVE"
instance FromJSON FindingState where
parseJSON = parseJSONText "FindingState"
instance ToJSON FindingState where
toJSON = toJSONText
-- | V1 error format.
data Xgafv
= X1
-- ^ @1@
-- v1 error format
| X2
-- ^ @2@
-- v2 error format
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable Xgafv
instance FromHttpApiData Xgafv where
parseQueryParam = \case
"1" -> Right X1
"2" -> Right X2
x -> Left ("Unable to parse Xgafv from: " <> x)
instance ToHttpApiData Xgafv where
toQueryParam = \case
X1 -> "1"
X2 -> "2"
instance FromJSON Xgafv where
parseJSON = parseJSONText "Xgafv"
instance ToJSON Xgafv where
toJSON = toJSONText
-- | The state of an asset discovery run.
data GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState
= GStateUnspecified
-- ^ @STATE_UNSPECIFIED@
-- Asset discovery run state was unspecified.
| GCompleted
-- ^ @COMPLETED@
-- Asset discovery run completed successfully.
| GSuperseded
-- ^ @SUPERSEDED@
-- Asset discovery run was cancelled with tasks still pending, as another
-- run for the same organization was started with a higher priority.
| GTerminated
-- ^ @TERMINATED@
-- Asset discovery run was killed and terminated.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState
instance FromHttpApiData GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState where
parseQueryParam = \case
"STATE_UNSPECIFIED" -> Right GStateUnspecified
"COMPLETED" -> Right GCompleted
"SUPERSEDED" -> Right GSuperseded
"TERMINATED" -> Right GTerminated
x -> Left ("Unable to parse GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState from: " <> x)
instance ToHttpApiData GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState where
toQueryParam = \case
GStateUnspecified -> "STATE_UNSPECIFIED"
GCompleted -> "COMPLETED"
GSuperseded -> "SUPERSEDED"
GTerminated -> "TERMINATED"
instance FromJSON GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState where
parseJSON = parseJSONText "GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState"
instance ToJSON GoogleCloudSecuritycenterV1RunAssetDiscoveryResponseState where
toJSON = toJSONText
-- | The state of an asset discovery run.
data GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState
= GOOStateUnspecified
-- ^ @STATE_UNSPECIFIED@
-- Asset discovery run state was unspecified.
| GOOCompleted
-- ^ @COMPLETED@
-- Asset discovery run completed successfully.
| GOOSuperseded
-- ^ @SUPERSEDED@
-- Asset discovery run was cancelled with tasks still pending, as another
-- run for the same organization was started with a higher priority.
| GOOTerminated
-- ^ @TERMINATED@
-- Asset discovery run was killed and terminated.
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState
instance FromHttpApiData GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState where
parseQueryParam = \case
"STATE_UNSPECIFIED" -> Right GOOStateUnspecified
"COMPLETED" -> Right GOOCompleted
"SUPERSEDED" -> Right GOOSuperseded
"TERMINATED" -> Right GOOTerminated
x -> Left ("Unable to parse GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState from: " <> x)
instance ToHttpApiData GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState where
toQueryParam = \case
GOOStateUnspecified -> "STATE_UNSPECIFIED"
GOOCompleted -> "COMPLETED"
GOOSuperseded -> "SUPERSEDED"
GOOTerminated -> "TERMINATED"
instance FromJSON GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState where
parseJSON = parseJSONText "GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState"
instance ToJSON GoogleCloudSecuritycenterV1beta1RunAssetDiscoveryResponseState where
toJSON = toJSONText
| brendanhay/gogol | gogol-securitycenter/gen/Network/Google/SecurityCenter/Types/Sum.hs | mpl-2.0 | 19,946 | 0 | 11 | 4,243 | 2,755 | 1,473 | 1,282 | 326 | 0 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.BinaryAuthorization.Projects.Attestors.TestIAMPermissions
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Returns permissions that a caller has on the specified resource. If the
-- resource does not exist, this will return an empty set of permissions,
-- not a \`NOT_FOUND\` error. Note: This operation is designed to be used
-- for building permission-aware UIs and command-line tools, not for
-- authorization checking. This operation may \"fail open\" without
-- warning.
--
-- /See:/ <https://cloud.google.com/binary-authorization/ Binary Authorization API Reference> for @binaryauthorization.projects.attestors.testIamPermissions@.
module Network.Google.Resource.BinaryAuthorization.Projects.Attestors.TestIAMPermissions
(
-- * REST Resource
ProjectsAttestorsTestIAMPermissionsResource
-- * Creating a Request
, projectsAttestorsTestIAMPermissions
, ProjectsAttestorsTestIAMPermissions
-- * Request Lenses
, patipXgafv
, patipUploadProtocol
, patipAccessToken
, patipUploadType
, patipPayload
, patipResource
, patipCallback
) where
import Network.Google.BinaryAuthorization.Types
import Network.Google.Prelude
-- | A resource alias for @binaryauthorization.projects.attestors.testIamPermissions@ method which the
-- 'ProjectsAttestorsTestIAMPermissions' request conforms to.
type ProjectsAttestorsTestIAMPermissionsResource =
"v1" :>
CaptureMode "resource" "testIamPermissions" Text :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
ReqBody '[JSON] TestIAMPermissionsRequest :>
Post '[JSON] TestIAMPermissionsResponse
-- | Returns permissions that a caller has on the specified resource. If the
-- resource does not exist, this will return an empty set of permissions,
-- not a \`NOT_FOUND\` error. Note: This operation is designed to be used
-- for building permission-aware UIs and command-line tools, not for
-- authorization checking. This operation may \"fail open\" without
-- warning.
--
-- /See:/ 'projectsAttestorsTestIAMPermissions' smart constructor.
data ProjectsAttestorsTestIAMPermissions =
ProjectsAttestorsTestIAMPermissions'
{ _patipXgafv :: !(Maybe Xgafv)
, _patipUploadProtocol :: !(Maybe Text)
, _patipAccessToken :: !(Maybe Text)
, _patipUploadType :: !(Maybe Text)
, _patipPayload :: !TestIAMPermissionsRequest
, _patipResource :: !Text
, _patipCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ProjectsAttestorsTestIAMPermissions' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'patipXgafv'
--
-- * 'patipUploadProtocol'
--
-- * 'patipAccessToken'
--
-- * 'patipUploadType'
--
-- * 'patipPayload'
--
-- * 'patipResource'
--
-- * 'patipCallback'
projectsAttestorsTestIAMPermissions
:: TestIAMPermissionsRequest -- ^ 'patipPayload'
-> Text -- ^ 'patipResource'
-> ProjectsAttestorsTestIAMPermissions
projectsAttestorsTestIAMPermissions pPatipPayload_ pPatipResource_ =
ProjectsAttestorsTestIAMPermissions'
{ _patipXgafv = Nothing
, _patipUploadProtocol = Nothing
, _patipAccessToken = Nothing
, _patipUploadType = Nothing
, _patipPayload = pPatipPayload_
, _patipResource = pPatipResource_
, _patipCallback = Nothing
}
-- | V1 error format.
patipXgafv :: Lens' ProjectsAttestorsTestIAMPermissions (Maybe Xgafv)
patipXgafv
= lens _patipXgafv (\ s a -> s{_patipXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
patipUploadProtocol :: Lens' ProjectsAttestorsTestIAMPermissions (Maybe Text)
patipUploadProtocol
= lens _patipUploadProtocol
(\ s a -> s{_patipUploadProtocol = a})
-- | OAuth access token.
patipAccessToken :: Lens' ProjectsAttestorsTestIAMPermissions (Maybe Text)
patipAccessToken
= lens _patipAccessToken
(\ s a -> s{_patipAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
patipUploadType :: Lens' ProjectsAttestorsTestIAMPermissions (Maybe Text)
patipUploadType
= lens _patipUploadType
(\ s a -> s{_patipUploadType = a})
-- | Multipart request metadata.
patipPayload :: Lens' ProjectsAttestorsTestIAMPermissions TestIAMPermissionsRequest
patipPayload
= lens _patipPayload (\ s a -> s{_patipPayload = a})
-- | REQUIRED: The resource for which the policy detail is being requested.
-- See the operation documentation for the appropriate value for this
-- field.
patipResource :: Lens' ProjectsAttestorsTestIAMPermissions Text
patipResource
= lens _patipResource
(\ s a -> s{_patipResource = a})
-- | JSONP
patipCallback :: Lens' ProjectsAttestorsTestIAMPermissions (Maybe Text)
patipCallback
= lens _patipCallback
(\ s a -> s{_patipCallback = a})
instance GoogleRequest
ProjectsAttestorsTestIAMPermissions
where
type Rs ProjectsAttestorsTestIAMPermissions =
TestIAMPermissionsResponse
type Scopes ProjectsAttestorsTestIAMPermissions =
'["https://www.googleapis.com/auth/cloud-platform"]
requestClient
ProjectsAttestorsTestIAMPermissions'{..}
= go _patipResource _patipXgafv _patipUploadProtocol
_patipAccessToken
_patipUploadType
_patipCallback
(Just AltJSON)
_patipPayload
binaryAuthorizationService
where go
= buildClient
(Proxy ::
Proxy ProjectsAttestorsTestIAMPermissionsResource)
mempty
| brendanhay/gogol | gogol-binaryauthorization/gen/Network/Google/Resource/BinaryAuthorization/Projects/Attestors/TestIAMPermissions.hs | mpl-2.0 | 6,585 | 0 | 16 | 1,356 | 789 | 465 | 324 | 120 | 1 |
-- 8)
isPalindrome :: (Eq a) => [a] -> Bool
isPalindrome a = a == b
where b = reverse a
-- 9)
myAbs :: Integer -> Integer
myAbs num =
if num < 0
then -num
else num
-- 10)
f :: (a, b) -> (c, d) -> ((b, d), (a, c))
f a b = (,) ((,) (snd a) (snd b)) ((,) (fst a) (fst b))
| ocozalp/Haskellbook | chapter4/exercises.hs | unlicense | 291 | 0 | 9 | 90 | 185 | 103 | 82 | 10 | 2 |
{-# LANGUAGE ViewPatterns #-}
import qualified Data.ByteString.Char8 as C
import qualified Data.IntMap.Strict as IntMap
import Data.IntMap.Strict(IntMap)
import Data.Maybe
mergeHelper d x m@(IntMap.lookupLT x -> Nothing) = m
mergeHelper d x m@(IntMap.lookupLT x -> Just (k, v))
| x >= v = m
| x - k <= d && v - x <= d = IntMap.delete k m
| x - k <= d = IntMap.insert x v . IntMap.delete k $ m
| v - x <= d = IntMap.insert k x . IntMap.delete k $ m
| otherwise = IntMap.insert k x . IntMap.insert x v . IntMap.delete k $ m
claw :: Int -> Int -> Int -> [[Int]] -> Int
claw n w d = (\(r, b) -> if b then r else -1) . fst . foldl go ((0, False), IntMap.singleton 0 w)
where go done@( (_, True), _ ) _ = done
go notyet@( (t, False), v) s = ( (succ t, IntMap.null v'), v')
where v'= foldl (flip (mergeHelper d)) v s
readint = fst . fromJust . C.readInt
processinputs 0 _ = return ()
processinputs k (h:s) = print (claw intervals width cap stones) >> processinputs (pred k) s2
where (intervals:width:cap:_) = map readint (C.words h)
(s1, s2) = splitAt intervals s
stones = map readstones s1
where readstones = tail . map readint . C.words
processall s = processinputs (readint l1) $ rest
where (l1:rest) = C.lines s
main = C.getContents >>= processall
| wangbj/haskell | claw.hs | bsd-2-clause | 1,312 | 0 | 13 | 309 | 664 | 345 | 319 | 27 | 3 |
module Day11 (run, moves) where
import Prelude hiding (zip)
import Control.Lens
import Data.Bits
import Data.Char
import Data.List
import qualified Data.Sequence as DSQ
import Data.Maybe
import Data.Ord
import qualified Data.Set as DSE
import Data.Time.Clock
import Numeric
topFloor = 4
data State = State { efloor :: Int
, floorContents :: [Int]
, prevMoves :: Int
} deriving Show
instance Eq State where
(==) (State f fs _) (State f' fs' _) = f == f' && fs == fs'
instance Ord State where
compare (State a ac _) (State b bc _) = compare (a, ac) (b, bc)
data Move = Move Int deriving Eq
instance Show Move where
show (Move m)= showIntAtBase 2 intToDigit m ""
getMove :: Move -> Int
getMove (Move m) = m
moves :: [Move]
moves = nub $ Move <$> ((.|.) <$> bs <*> bs)
where
bs = shift 1 <$> [0..(floorSize * 2 - 1)]
validMove :: State -> (Int, Move) -> Bool
validMove s m = ((from `xor` m') .&. m' == 0) && (validFloor (fc' !! f) && validFloor (fc' !! f'))
where
moved = doMove s m
(State f fc _) = s
(State f' fc' _) = moved
(i, Move m') = m
from = fc !! f
moveMask = from `xor` m'
validFloor :: Int -> Bool
validFloor f = fg == 0 || ((fm `xor` fg) .&. fm == 0)
where
fg = shift f (-floorSize)
fm = f .&. (shift 1 floorSize - 1)
validMoves :: State -> [(Int, Move)]
validMoves s = filter (validMove s) floorMoves
where
(State floor floors _) = s
toFloors = filter ((&&) <$> (>= 0) <*> (< topFloor)) $ (+ floor) <$> [-1, 1]
floorMoves = (,) <$> toFloors <*> moves
doMove :: State -> (Int, Move) -> State
doMove s (i, (Move m)) = State i (fc & ix e -~ m & ix i +~ m) (p + 1)
where
(State e fc p) = s
checkState :: State -> Bool
checkState (State _ fc _) = last fc == (shift 1 (floorSize * 2) - 1)
putMaybe :: (a -> Bool) -> a -> Maybe a
putMaybe f a = if f a then Just a else Nothing
-- emptyState = State 0 (bin2dec <$> ["1111111010", "0000000101", "0000000000", "0000000000"]) 0
emptyState = State 0 (bin2dec <$> ["11111111111010", "00000000000101", "00000000000000", "00000000000000"]) 0
floorSize = 7
bin2dec :: String -> Int
bin2dec = foldr (\c s -> s * 2 + c) 0 . reverse . map c2i
where c2i c = if c == '0' then 0 else 1
solve :: (DSE.Set State, DSQ.Seq State) -> State
solve (DSE.null -> True, DSQ.null -> True) = solve (foldr DSE.insert DSE.empty nextStates, DSQ.zipWith doMove (DSQ.replicate (length nextMoves) emptyState) (DSQ.fromList nextMoves))
where
nextStates = DSQ.zipWith doMove (DSQ.replicate (length nextMoves) emptyState) (DSQ.fromList nextMoves)
nextMoves = validMoves emptyState
solve (ss, (DSQ.viewl -> (s DSQ.:< sms)))
| checkState s = s
| otherwise =
let
nextStates = DSQ.zipWith doMove (DSQ.replicate (length nextMoves) s) (DSQ.fromList nextMoves)
nextMoves = validMoves s
in
solve (foldr DSE.insert ss nextStates, sms DSQ.>< ((DSQ.filter (not . flip DSE.member ss) nextStates)))
run = do
getCurrentTime >>= print
print $ solve (DSE.empty, DSQ.empty)
getCurrentTime >>= print
| ulyssesp/AoC | src/day11.hs | bsd-3-clause | 3,220 | 0 | 17 | 854 | 1,362 | 732 | 630 | -1 | -1 |
-----------------------------------------------------------------------------
-- |
-- Module : Control.Exception
-- Copyright : (c) The University of Glasgow 2001
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : non-portable
--
-- This module provides support for raising and catching both built-in
-- and user-defined exceptions.
--
-----------------------------------------------------------------------------
module Control.Exception (
-- * The Exception type
Exception(..), -- instance Eq, Ord, Show, Typeable
IOException, -- instance Eq, Ord, Show, Typeable
ArithException(..), -- instance Eq, Ord, Show, Typeable
ArrayException(..), -- instance Eq, Ord, Show, Typeable
AsyncException(..), -- instance Eq, Ord, Show, Typeable
-- * Throwing exceptions
throwIO, -- :: Exception -> IO a
throw, -- :: Exception -> a
ioError, -- :: IOError -> IO a
-- * Catching Exceptions
-- |There are several functions for catching and examining
-- exceptions; all of them may only be used from within the
-- 'IO' monad.
-- ** The @catch@ functions
catch, -- :: IO a -> (Exception -> IO a) -> IO a
catchJust, -- :: (Exception -> Maybe b) -> IO a -> (b -> IO a) -> IO a
-- ** The @handle@ functions
handle, -- :: (Exception -> IO a) -> IO a -> IO a
handleJust,-- :: (Exception -> Maybe b) -> (b -> IO a) -> IO a -> IO a
-- ** The @try@ functions
try, -- :: IO a -> IO (Either Exception a)
tryJust, -- :: (Exception -> Maybe b) -> a -> IO (Either b a)
-- ** The @evaluate@ function
evaluate, -- :: a -> IO a
-- ** The @mapException@ function
mapException, -- :: (Exception -> Exception) -> a -> a
-- ** Exception predicates
-- $preds
ioErrors, -- :: Exception -> Maybe IOError
arithExceptions, -- :: Exception -> Maybe ArithException
errorCalls, -- :: Exception -> Maybe String
dynExceptions, -- :: Exception -> Maybe Dynamic
assertions, -- :: Exception -> Maybe String
asyncExceptions, -- :: Exception -> Maybe AsyncException
userErrors, -- :: Exception -> Maybe String
-- * Dynamic exceptions
-- $dynamic
throwDyn, -- :: Typeable ex => ex -> b
catchDyn, -- :: Typeable ex => IO a -> (ex -> IO a) -> IO a
-- * Asynchronous Exceptions
-- $async
-- ** Asynchronous exception control
-- |The following two functions allow a thread to control delivery of
-- asynchronous exceptions during a critical region.
block, -- :: IO a -> IO a
unblock, -- :: IO a -> IO a
-- *** Applying @block@ to an exception handler
-- $block_handler
-- *** Interruptible operations
-- $interruptible
-- * Assertions
assert, -- :: Bool -> a -> a
-- * Utilities
bracket, -- :: IO a -> (a -> IO b) -> (a -> IO c) -> IO ()
bracket_, -- :: IO a -> IO b -> IO c -> IO ()
finally, -- :: IO a -> IO b -> IO a
) where
import Hugs.Exception as ExceptionBase
import Prelude hiding ( catch )
import System.IO.Error hiding ( catch, try )
import System.IO.Unsafe (unsafePerformIO)
import Data.Dynamic
exceptionTc = mkTyCon "Exception"; instance Typeable Exception where { typeOf _ = mkAppTy exceptionTc [] }
ioExceptionTc = mkTyCon "IOException"; instance Typeable IOException where { typeOf _ = mkAppTy ioExceptionTc [] }
arithExceptionTc = mkTyCon "ArithException"; instance Typeable ArithException where { typeOf _ = mkAppTy arithExceptionTc [] }
arrayExceptionTc = mkTyCon "ArrayException"; instance Typeable ArrayException where { typeOf _ = mkAppTy arrayExceptionTc [] }
asyncExceptionTc = mkTyCon "AsyncException"; instance Typeable AsyncException where { typeOf _ = mkAppTy asyncExceptionTc [] }
-----------------------------------------------------------------------------
-- Catching exceptions
-- |This is the simplest of the exception-catching functions. It
-- takes a single argument, runs it, and if an exception is raised
-- the \"handler\" is executed, with the value of the exception passed as an
-- argument. Otherwise, the result is returned as normal. For example:
--
-- > catch (openFile f ReadMode)
-- > (\e -> hPutStr stderr (\"Couldn\'t open \"++f++\": \" ++ show e))
--
-- For catching exceptions in pure (non-'IO') expressions, see the
-- function 'evaluate'.
--
-- Note that due to Haskell\'s unspecified evaluation order, an
-- expression may return one of several possible exceptions: consider
-- the expression @error \"urk\" + 1 \`div\` 0@. Does
-- 'catch' execute the handler passing
-- @ErrorCall \"urk\"@, or @ArithError DivideByZero@?
--
-- The answer is \"either\": 'catch' makes a
-- non-deterministic choice about which exception to catch. If you
-- call it again, you might get a different exception back. This is
-- ok, because 'catch' is an 'IO' computation.
--
-- Note that 'catch' catches all types of exceptions, and is generally
-- used for \"cleaning up\" before passing on the exception using
-- 'throwIO'. It is not good practice to discard the exception and
-- continue, without first checking the type of the exception (it
-- might be a 'ThreadKilled', for example). In this case it is usually better
-- to use 'catchJust' and select the kinds of exceptions to catch.
--
-- Also note that The "Prelude" also exports a
-- function called 'catch' which has the same type as
-- 'Control.Exception.catch', the difference being that the
-- "Prelude" version only catches the IO and user
-- families of exceptions (as required by Haskell 98). We recommend
-- either hiding the "Prelude" version of
-- 'catch' when importing
-- "Control.Exception", or importing
-- "Control.Exception" qualified, to avoid name-clashes.
catch :: IO a -- ^ The computation to run
-> (Exception -> IO a) -- ^ Handler to invoke if an exception is raised
-> IO a
catch = ExceptionBase.catchException
-- | The function 'catchJust' is like 'catch', but it takes an extra
-- argument which is an /exception predicate/, a function which
-- selects which type of exceptions we\'re interested in. There are
-- some predefined exception predicates for useful subsets of
-- exceptions: 'ioErrors', 'arithExceptions', and so on. For example,
-- to catch just calls to the 'error' function, we could use
--
-- > result <- catchJust errorCalls thing_to_try handler
--
-- Any other exceptions which are not matched by the predicate
-- are re-raised, and may be caught by an enclosing
-- 'catch' or 'catchJust'.
catchJust
:: (Exception -> Maybe b) -- ^ Predicate to select exceptions
-> IO a -- ^ Computation to run
-> (b -> IO a) -- ^ Handler
-> IO a
catchJust p a handler = catch a handler'
where handler' e = case p e of
Nothing -> throw e
Just b -> handler b
-- | A version of 'catch' with the arguments swapped around; useful in
-- situations where the code for the handler is shorter. For example:
--
-- > do handle (\e -> exitWith (ExitFailure 1)) $
-- > ...
handle :: (Exception -> IO a) -> IO a -> IO a
handle = flip catch
-- | A version of 'catchJust' with the arguments swapped around (see
-- 'handle').
handleJust :: (Exception -> Maybe b) -> (b -> IO a) -> IO a -> IO a
handleJust p = flip (catchJust p)
-----------------------------------------------------------------------------
-- evaluate
-- | Forces its argument to be evaluated, and returns the result in
-- the 'IO' monad. It can be used to order evaluation with respect to
-- other 'IO' operations; its semantics are given by
--
-- > evaluate undefined `seq` return () ==> return ()
-- > catch (evaluate undefined) (\e -> return ()) ==> return ()
--
-- NOTE: @(evaluate a)@ is /not/ the same as @(a \`seq\` return a)@.
-----------------------------------------------------------------------------
-- 'mapException'
-- | This function maps one exception into another as proposed in the
-- paper \"A semantics for imprecise exceptions\".
-- Notice that the usage of 'unsafePerformIO' is safe here.
mapException :: (Exception -> Exception) -> a -> a
mapException f v = unsafePerformIO (catch (evaluate v)
(\x -> throw (f x)))
-----------------------------------------------------------------------------
-- 'try' and variations.
-- | Similar to 'catch', but returns an 'Either' result which is
-- @(Right a)@ if no exception was raised, or @(Left e)@ if an
-- exception was raised and its value is @e@.
--
-- > try a = catch (Right \`liftM\` a) (return . Left)
--
-- Note: as with 'catch', it is only polite to use this variant if you intend
-- to re-throw the exception after performing whatever cleanup is needed.
-- Otherwise, 'tryJust' is generally considered to be better.
--
try :: IO a -> IO (Either Exception a)
try a = catch (a >>= \ v -> return (Right v)) (\e -> return (Left e))
-- | A variant of 'try' that takes an exception predicate to select
-- which exceptions are caught (c.f. 'catchJust'). If the exception
-- does not match the predicate, it is re-thrown.
tryJust :: (Exception -> Maybe b) -> IO a -> IO (Either b a)
tryJust p a = do
r <- try a
case r of
Right v -> return (Right v)
Left e -> case p e of
Nothing -> throw e
Just b -> return (Left b)
-----------------------------------------------------------------------------
-- Dynamic exceptions
-- $dynamic
-- #DynamicExceptions# Because the 'Exception' datatype is not extensible, there is an
-- interface for throwing and catching exceptions of type 'Dynamic'
-- (see "Data.Dynamic") which allows exception values of any type in
-- the 'Typeable' class to be thrown and caught.
-- | Raise any value as an exception, provided it is in the
-- 'Typeable' class.
throwDyn :: Typeable exception => exception -> b
throwDyn exception = throw (DynException (toDyn exception))
-- | Catch dynamic exceptions of the required type. All other
-- exceptions are re-thrown, including dynamic exceptions of the wrong
-- type.
--
-- When using dynamic exceptions it is advisable to define a new
-- datatype to use for your exception type, to avoid possible clashes
-- with dynamic exceptions used in other libraries.
--
catchDyn :: Typeable exception => IO a -> (exception -> IO a) -> IO a
catchDyn m k = catchException m handle
where handle ex = case ex of
(DynException dyn) ->
case fromDynamic dyn of
Just exception -> k exception
Nothing -> throw ex
_ -> throw ex
-----------------------------------------------------------------------------
-- Exception Predicates
-- $preds
-- These pre-defined predicates may be used as the first argument to
-- 'catchJust', 'tryJust', or 'handleJust' to select certain common
-- classes of exceptions.
ioErrors :: Exception -> Maybe IOError
arithExceptions :: Exception -> Maybe ArithException
errorCalls :: Exception -> Maybe String
assertions :: Exception -> Maybe String
dynExceptions :: Exception -> Maybe Dynamic
asyncExceptions :: Exception -> Maybe AsyncException
userErrors :: Exception -> Maybe String
ioErrors (IOException e) = Just e
ioErrors _ = Nothing
arithExceptions (ArithException e) = Just e
arithExceptions _ = Nothing
errorCalls (ErrorCall e) = Just e
errorCalls _ = Nothing
assertions (AssertionFailed e) = Just e
assertions _ = Nothing
dynExceptions (DynException e) = Just e
dynExceptions _ = Nothing
asyncExceptions (AsyncException e) = Just e
asyncExceptions _ = Nothing
userErrors (IOException e) | isUserError e = Just (ioeGetErrorString e)
userErrors _ = Nothing
-----------------------------------------------------------------------------
-- Some Useful Functions
-- | When you want to acquire a resource, do some work with it, and
-- then release the resource, it is a good idea to use 'bracket',
-- because 'bracket' will install the necessary exception handler to
-- release the resource in the event that an exception is raised
-- during the computation. If an exception is raised, then 'bracket' will
-- re-raise the exception (after performing the release).
--
-- A common example is opening a file:
--
-- > bracket
-- > (openFile "filename" ReadMode)
-- > (hClose)
-- > (\handle -> do { ... })
--
-- The arguments to 'bracket' are in this order so that we can partially apply
-- it, e.g.:
--
-- > withFile name = bracket (openFile name) hClose
--
bracket
:: IO a -- ^ computation to run first (\"acquire resource\")
-> (a -> IO b) -- ^ computation to run last (\"release resource\")
-> (a -> IO c) -- ^ computation to run in-between
-> IO c -- returns the value from the in-between computation
bracket before after thing =
block (do
a <- before
r <- catch
(unblock (thing a))
(\e -> do { after a; throw e })
after a
return r
)
-- | A specialised variant of 'bracket' with just a computation to run
-- afterward.
--
finally :: IO a -- ^ computation to run first
-> IO b -- ^ computation to run afterward (even if an exception
-- was raised)
-> IO a -- returns the value from the first computation
a `finally` sequel =
block (do
r <- catch
(unblock a)
(\e -> do { sequel; throw e })
sequel
return r
)
-- | A variant of 'bracket' where the return value from the first computation
-- is not required.
bracket_ :: IO a -> IO b -> IO c -> IO c
bracket_ before after thing = bracket before (const after) (const thing)
-- -----------------------------------------------------------------------------
-- Asynchronous exceptions
{- $async
#AsynchronousExceptions# Asynchronous exceptions are so-called because they arise due to
external influences, and can be raised at any point during execution.
'StackOverflow' and 'HeapOverflow' are two examples of
system-generated asynchronous exceptions.
The primary source of asynchronous exceptions, however, is
'throwTo':
> throwTo :: ThreadId -> Exception -> IO ()
'throwTo' (also 'throwDynTo' and 'Control.Concurrent.killThread') allows one
running thread to raise an arbitrary exception in another thread. The
exception is therefore asynchronous with respect to the target thread,
which could be doing anything at the time it receives the exception.
Great care should be taken with asynchronous exceptions; it is all too
easy to introduce race conditions by the over zealous use of
'throwTo'.
-}
{- $block_handler
There\'s an implied 'block' around every exception handler in a call
to one of the 'catch' family of functions. This is because that is
what you want most of the time - it eliminates a common race condition
in starting an exception handler, because there may be no exception
handler on the stack to handle another exception if one arrives
immediately. If asynchronous exceptions are blocked on entering the
handler, though, we have time to install a new exception handler
before being interrupted. If this weren\'t the default, one would have
to write something like
> block (
> catch (unblock (...))
> (\e -> handler)
> )
If you need to unblock asynchronous exceptions again in the exception
handler, just use 'unblock' as normal.
Note that 'try' and friends /do not/ have a similar default, because
there is no exception handler in this case. If you want to use 'try'
in an asynchronous-exception-safe way, you will need to use
'block'.
-}
{- $interruptible
Some operations are /interruptible/, which means that they can receive
asynchronous exceptions even in the scope of a 'block'. Any function
which may itself block is defined as interruptible; this includes
'Control.Concurrent.MVar.takeMVar'
(but not 'Control.Concurrent.MVar.tryTakeMVar'),
and most operations which perform
some I\/O with the outside world. The reason for having
interruptible operations is so that we can write things like
> block (
> a <- takeMVar m
> catch (unblock (...))
> (\e -> ...)
> )
if the 'Control.Concurrent.MVar.takeMVar' was not interruptible,
then this particular
combination could lead to deadlock, because the thread itself would be
blocked in a state where it can\'t receive any asynchronous exceptions.
With 'Control.Concurrent.MVar.takeMVar' interruptible, however, we can be
safe in the knowledge that the thread can receive exceptions right up
until the point when the 'Control.Concurrent.MVar.takeMVar' succeeds.
Similar arguments apply for other interruptible operations like
'System.IO.openFile'.
-}
-- -----------------------------------------------------------------------------
-- Assert
assert :: Bool -> a -> a
assert True x = x
assert False _ = throw (AssertionFailed "")
| OS2World/DEV-UTIL-HUGS | libraries/Control/Exception.hs | bsd-3-clause | 16,620 | 199 | 15 | 3,249 | 1,863 | 1,084 | 779 | 131 | 3 |
{-# OPTIONS_GHC -fno-warn-unused-binds -fno-warn-unused-matches -fno-warn-name-shadowing -fno-warn-missing-signatures #-}
{-# LANGUAGE FlexibleInstances, MultiParamTypeClasses, UndecidableInstances, FlexibleContexts, TypeSynonymInstances #-}
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
-- |
-- | Module : Fequency Moments
-- | Creator: Xiao Ling
-- | Created: 12/17/2015
-- |
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
module FeqMoments where
import Prelude hiding (replicate)
import Control.Monad.Random.Class
import Control.Monad.Random
import Control.Monad.State
import Data.Conduit
import Data.Foldable (toList)
import qualified Data.Sequence as S
import qualified Data.Conduit.List as Cl
import Data.Sequence (Seq,(|>),update,empty)
import Core
import Statistics
{-----------------------------------------------------------------------------
Types
------------------------------------------------------------------------------}
{-----------------------------------------------------------------------------
Approximate Median
------------------------------------------------------------------------------}
| lingxiao/CIS700 | src/FeqMoments.hs | bsd-3-clause | 1,456 | 0 | 5 | 112 | 104 | 75 | 29 | 14 | 0 |
{-# LANGUAGE MagicHash #-}
module Main where
import GHC.Prim
import GHC.Int
-- Test that large unchecked shifts, which constitute undefined behavior, do
-- not crash the compiler and instead evaluate to 0.
-- See Note [Guarding against silly shifts] in PrelRules.
-- Shift should be larger than the word size (e.g. 64 on 64-bit) for this test.
main = print (I# (uncheckedIShiftL# 1# 1000#))
| sdiehl/ghc | testsuite/tests/codeGen/should_run/T16449_2.hs | bsd-3-clause | 395 | 0 | 9 | 68 | 41 | 25 | 16 | 5 | 1 |
-- Copyright (c) 2016-present, Facebook, Inc.
-- All rights reserved.
--
-- This source code is licensed under the BSD-style license found in the
-- LICENSE file in the root directory of this source tree.
{-# LANGUAGE OverloadedStrings #-}
module Duckling.Volume.KM.Corpus
( corpus ) where
import Data.String
import Prelude
import Duckling.Locale
import Duckling.Resolve
import Duckling.Volume.Types
import Duckling.Testing.Types
context :: Context
context = testContext{locale = makeLocale KM Nothing}
corpus :: Corpus
corpus = (context, testOptions, allExamples)
allExamples :: [Example]
allExamples = concat
[ examples (simple Litre 1)
[ "1 លីត្រ"
, "1l"
, "១លីត្រ"
]
, examples (simple Litre 0.5)
[ "កន្លះលីត្រ"
, "១/២លីត្រ"
]
, examples (simple Litre 0.25)
[ "មួយភាគបួនលីត្រ"
, "១/៤លីត្រ"
]
, examples (simple Millilitre 1)
[ "1 មីលីលីត្រ"
, "1ml"
, "១មីលីលីត្រ"
]
, examples (between Litre (2,7))
[ "ចាប់ពី 2 ដល់ 7 l"
, "ចន្លោះពី ២ ដល់ ៧លីត្រ"
, "ចន្លោះ ២លីត្រ និង ៧លីត្រ"
, "ប្រហែល ២-៧ លីត្រ"
, "~2-7លីត្រ"
]
, examples (under Millilitre 500)
[ "តិចជាងប្រាំរយមីលីលីត្រ"
, "មិនលើសប្រាំរយមីលីលីត្រ"
, "ក្រោមប្រាំរយមីលីលីត្រ"
, "យ៉ាងច្រើនប្រាំរយមីលីលីត្រ"
]
, examples (above Millilitre 500)
[ "ច្រើនជាងប្រាំរយមីលីលីត្រ"
, "មិនតិចជាងប្រាំរយមីលីលីត្រ"
, "លើសពីប្រាំរយមីលីលីត្រ"
, "យ៉ាងតិចប្រាំរយមីលីលីត្រ"
]
]
| facebookincubator/duckling | Duckling/Volume/KM/Corpus.hs | bsd-3-clause | 2,362 | 0 | 10 | 599 | 294 | 174 | 120 | 45 | 1 |
module Main where
import Avail
import Control.Monad.IO.Class
import DynFlags
(getDynFlags, parseDynamicFlagsCmdLine, defaultFatalMessager, defaultFlushOut)
import GHC
import GHC.Fingerprint.Type
import GHC.Hs.Decls
import GHC.Hs.Doc
import GHC.Hs.Expr
import GHC.Hs.Extension
import GHC.Hs.ImpExp
import HscTypes
import Outputable
import Plugins
import System.Environment
import TcRnTypes
import Simple.SourcePlugin (plugin)
main = do
libdir:args <- getArgs
defaultErrorHandler defaultFatalMessager defaultFlushOut $ do
runGhc (Just libdir) $ do
dflags <- getSessionDynFlags
-- liftIO $ print args
-- (dflags,_,_)
-- <- parseDynamicFlagsCmdLine dflags (map noLoc args)
-- we need to LinkInMemory otherwise `setTarget [] >> load LoadAllTargets`
-- below will fail.
setSessionDynFlags dflags { ghcLink = LinkInMemory}
-- Start with a pure plugin, this should trigger recomp.
liftIO $ putStrLn "==pure.0"
loadWithPlugins [StaticPlugin $ PluginWithArgs plugin0_pure []]
-- The same (or a different) pure plugin shouldn't trigger recomp.
liftIO $ putStrLn "==pure.1"
loadWithPlugins [StaticPlugin $ PluginWithArgs plugin0_pure []]
-- Next try with a fingerprint plugin, should trigger recomp.
liftIO $ putStrLn "==fp0.0"
loadWithPlugins [StaticPlugin $ PluginWithArgs plugin_fp0 []]
-- With the same fingerprint plugin, should not trigger recomp.
liftIO $ putStrLn "==fp0.1"
loadWithPlugins [StaticPlugin $ PluginWithArgs plugin_fp0 []]
-- Change the plugin fingerprint, should trigger recomp.
liftIO $ putStrLn "==fp1"
loadWithPlugins [StaticPlugin $ PluginWithArgs plugin_fp1 []]
-- TODO: this currently doesn't work, patch pending
-- -- Even though the plugin is now pure we should still recomp since we
-- -- used a potentially impure plugin before
-- liftIO $ putStrLn "pure.2"
-- loadWithPlugins [StaticPlugin $ PluginWithArgs plugin0_pure []]
where
loadWithPlugins the_plugins = do
-- first unload (like GHCi :load does)
GHC.setTargets []
_ <- GHC.load LoadAllTargets
target <- guessTarget "static-plugins-module.hs" Nothing
setTargets [target]
dflags <- getSessionDynFlags
setSessionDynFlags dflags { staticPlugins = the_plugins
, outputFile = Nothing }
load LoadAllTargets
plugin_fp0 =
plugin { pluginRecompile = \_ -> pure $ MaybeRecompile $ Fingerprint 0 0 }
plugin_fp1 =
plugin { pluginRecompile = \_ -> pure $ MaybeRecompile $ Fingerprint 0 1 }
plugin0_pure =
plugin { pluginRecompile = \_ -> pure $ NoForceRecompile }
| sdiehl/ghc | testsuite/tests/plugins/static-plugins.hs | bsd-3-clause | 2,713 | 0 | 17 | 607 | 498 | 261 | 237 | 49 | 1 |
module Lexer where
-- Copyright 1994 by Peter Thiemann
-- Last Modified By: M. Walter
--
import Data.Char
------------------------------------------------------------------------------
--NOW the lexer
------------------------------------------------------------------------------
data Token
= Ident String | Symbol String | String String | Number String
instance Show Token where
showsPrec n (Ident s) = showChar '[' . showString s . showString "] "
showsPrec n (Symbol "\n") = showString "\n"
showsPrec n (Symbol s) = showChar '<' . showString s . showString "> "
showsPrec n (String s) = showChar '"' . showString s . showString "\" "
showsPrec n (Number s) = showChar ' ' . showString s . showChar ' '
showList [] = id
showList (x:xs) = shows x . showList xs
isIdChar c = isAlpha c || isDigit c || c == '_' || c == '.' -- 1.01
theSymbols = "!@#$%^&*+./<=>?\\|:"
isSymbolChar c = c `elem` theSymbols
lexer :: String -> [Token]
lexer "" = []
lexer ('"':cs) = String (stchars): lexer srest
where (stchars, srest) = lexString cs
lexer ('\'':cs) = String (oneChar): lexer srest
where (oneChar, srest) = lexChar cs
lexer ((c@'\n'):cs) = Symbol [c]: lexer cs
lexer (c:cs)
| isNewline c = Symbol [c]: lexer cs
| isSpace c = lexer cs
| isDot c = Ident (c:idchars): lexer irest -- 1.01
| isAlpha c = Ident (c:idchars): lexer irest
| isSymbolChar c = Symbol(c:sychars): lexer srest
| isDigit c = Number (c:digits): lexer nrest
| otherwise = Symbol([c]): lexer cs
where (idchars, irest) = span isIdChar cs
(sychars, srest) = span isSymbolChar cs
(digits, nrest) = span isDigit cs
isNewline c = c == '\n'
isDot c = c == '.' -- 1.01
-- preprocessor for EBNF style comments
uncomment :: String -> String
uncomment "" = ""
uncomment ('#':cs) = uncomment (dropWhile (/= '\n') cs)
uncomment ('"':cs) = '"':uncommentString cs
uncomment ('\'':cs) = '\'':uncommentChar cs
uncomment (c:cs) = c:uncomment cs
uncommentString "" = ""
uncommentString ('\\':c:cs) = '\\':c:uncommentString cs
uncommentString ('"':cs) = '"':uncomment cs
uncommentString (c:cs) = c:uncommentString cs
uncommentChar "" = ""
uncommentChar ('\\':c:cs) = '\\':c:uncommentChar cs
uncommentChar ('\'':cs) = '"':uncomment cs
uncommentChar (c:cs) = c:uncommentChar cs
-- generic lexers
lexChar ('\\':c:'\'':cs) = ([c], cs)
lexChar (c:'\'':cs) = ([c], cs)
lexChar cs = ([], cs)
lexString ('\\':c:cs) = (c:stchars, srest) where (stchars, srest) = lexString cs
lexString ('"':cs) = ("", cs)
lexString ("") = ("","")
lexString (c:cs) = (c:stchars, srest) where (stchars, srest) = lexString cs
isIdent (Ident _ ) = True
isIdent _ = False
getIdent (Ident s) = s
isString (String _) = True
isString _ = False
getString (String s) = s
| FranklinChen/Ebnf2ps | src/Lexer.hs | bsd-3-clause | 2,807 | 4 | 10 | 559 | 1,254 | 638 | 616 | 62 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Lib
( someFunc
) where
someFunc :: IO ()
someFunc = putStrLn "Hello, world!"
| tetigi/raider | src/Lib.hs | bsd-3-clause | 129 | 0 | 6 | 29 | 28 | 16 | 12 | 5 | 1 |
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE FunctionalDependencies #-}
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE FlexibleInstances #-}
module BitTorrent.Types where
import Control.Concurrent.MVar
import Control.Lens
import Data.Word
import Network (Socket)
import qualified Data.ByteString as B
type Hash = B.ByteString
data MetainfoFile = MetainfoFile
{ _mtfSize :: Integer
, _mtfPath :: String
}
makeFields ''MetainfoFile
data Metainfo = Metainfo
{ _mtAnnounce :: String
, _mtInfoHash :: B.ByteString
, _mtName :: B.ByteString
, _mtPieceLen :: Integer
, _mtPieces :: [Hash]
, _mtSize :: Maybe Integer
, _mtFiles :: Maybe [MetainfoFile]
}
makeFields ''Metainfo
data Event = Started | Completed | Stopped | Empty
deriving (Eq)
instance Show Event where
show Started = "started"
show Completed = "completed"
show Stopped = "stopped"
show Empty = "empty"
data TrackerRequest = TrackerRequest
{ _reqAnnounce :: String
, _reqInfoHash :: B.ByteString
, _reqPeerId :: String
, _reqIp :: String
, _reqPort :: Int
, _reqUploaded :: Int
, _reqDownloaded :: Int
, _reqLeft :: Int
, _reqEvent :: Event
, _reqCompact :: Int
} deriving (Eq, Show)
makeFields ''TrackerRequest
data Peer = Peer
{ _peerId :: Maybe String
, _peerIp :: Word32
, _peerPort :: Word16
} deriving (Eq, Show)
makeFields ''Peer
data TrackerResponse = TrackerResponse
{ _resInterval :: Int
, _resPeers :: [Peer]
} deriving (Eq, Show)
makeFields ''TrackerResponse
data PieceState = PieceDone | PieceStarted | PieceEmpty
data Download = Download
{ _dlTrackerResponse :: MVar TrackerResponse
}
makeFields ''Download
| kylc/torrent | src/BitTorrent/Types.hs | bsd-3-clause | 1,784 | 0 | 10 | 399 | 439 | 256 | 183 | 60 | 0 |
{-# LANGUAGE OverloadedStrings #-}
import Text.Blaze.Html (toHtml)
import Text.Blaze.Html5 (figure)
import Test.Hspec
import Text.Markdown
import Data.Text.Lazy (Text, unpack, snoc, fromStrict)
import qualified Data.Text as T
import qualified Data.Text.IO as TIO
import qualified Data.Text.Lazy as TL
import Text.Blaze.Html.Renderer.Text (renderHtml)
import Control.Monad (forM_)
import qualified Data.Set as Set
import qualified Data.Map as Map
import Data.List (isInfixOf, isSuffixOf)
import Data.Maybe (fromMaybe)
import System.Directory (getDirectoryContents)
import System.FilePath ((</>), replaceExtension)
import Block
import Inline
check :: Text -> Text -> Expectation
check html md = renderHtml (markdown def md) `shouldBe` html
checkSet :: MarkdownSettings -> Text -> Text -> Expectation
checkSet set html md = renderHtml (markdown set md) `shouldBe` html
check' :: Text -> Text -> Expectation
check' html md = renderHtml (markdown def { msXssProtect = False } md) `shouldBe` html
checkNoNL :: Text -> Text -> Expectation
checkNoNL html md =
f (renderHtml $ markdown def { msXssProtect = False } md) `shouldBe` f html
where
f = TL.filter (/= '\n')
-- FIXME add quickcheck: all input is valid
main :: IO ()
main = do
examples <- getExamples
gruber <- getGruber
hspec $ do
describe "block" blockSpecs
describe "inline" inlineSpecs
describe "line break" $ do
it "is inserted for a single newline after two spaces"
$ check "<p>Hello<br>World!</p>" "Hello \nWorld!"
it "is also inserted for a single CRLF after two spaces"
$ check "<p>Hello<br>World!</p>" "Hello \r\nWorld!"
it "preserves quote nesting of the previous line"
$ check "<blockquote><p>Q1<br>Q2</p></blockquote><p>P2</p>"
"> Q1 \nQ2\n\nP2"
it "consumes all trailing whitespace on the previous line"
$ check "<p>Hello<br>World!</p>" "Hello \nWorld!"
describe "paragraphs" $ do
it "simple"
$ check "<p>Hello World!</p>" "Hello World!"
it "multiline"
$ check "<p>Hello\nWorld!</p>" "Hello\nWorld!"
it "multiple"
$ check "<p>Hello</p><p>World!</p>" "Hello\n\nWorld!"
describe "italics" $ do
it "simple"
$ check "<p><i>foo</i></p>" "*foo*"
it "hanging"
$ check "<p><i>foo</i> *</p>" "*foo* *"
it "two"
$ check "<p><i>foo</i> <i>bar</i></p>" "*foo* *bar*"
describe "italics under" $ do
it "simple"
$ check "<p><i>foo</i></p>" "_foo_"
it "hanging"
$ check "<p><i>foo</i> _</p>" "_foo_ _"
it "two"
$ check "<p><i>foo</i> <i>bar</i></p>" "_foo_ _bar_"
describe "bold" $ do
it "simple"
$ check "<p><b>foo</b></p>" "**foo**"
it "hanging"
$ check "<p><b>foo</b> **</p>" "**foo** **"
it "two"
$ check "<p><b>foo</b> <b>bar</b></p>" "**foo** **bar**"
describe "bold under" $ do
it "simple"
$ check "<p><b>foo</b></p>" "__foo__"
it "hanging"
$ check "<p><b>foo</b> __</p>" "__foo__ __"
it "two"
$ check "<p><b>foo</b> <b>bar</b></p>" "__foo__ __bar__"
describe "html" $ do
it "simple"
$ check "<div>Hello</div>" "<div>Hello</div>"
it "dangerous"
$ check "<div>Hello</div>" "<div onclick='alert(foo)'>Hello</div>"
it "dangerous and allowed"
$ check' "<div onclick='alert(foo)'>Hello</div>" "<div onclick='alert(foo)'>Hello</div>"
let ml = "<div>foo\nbar\nbaz</div>"
it "multiline" $ check ml ml
let close = "<div>foo\nbar\nbaz"
it "autoclose" $ check ml close
let close2 = "<div>foo\nbar\nbaz\n\nparagraph"
it "autoclose 2"
$ check "<div>foo\nbar\nbaz<p>paragraph</p></div>" close2
describe "inline code" $ do
it "simple"
$ check "<p>foo <code>bar</code> baz</p>" "foo `bar` baz"
describe "code block" $ do
it "simple"
$ check
"<pre><code>foo\n bar\nbaz</code></pre>"
" foo\n bar\n baz"
it "custom renderer"
$ checkSet
def { msBlockCodeRenderer = (\_ (u,_) -> figure (toHtml u)) }
"<figure>foo\n bar\nbaz</figure>"
"```haskell\nfoo\n bar\nbaz\n```"
describe "escaping" $ do
it "everything"
$ check
"<p>*foo_bar<i>baz</i>\\`bin</p>"
"\\*foo\\_bar_baz_\\\\\\`bin"
describe "bullets" $ do
it "simple"
$ check
"<ul><li>foo</li><li>bar</li><li>baz</li></ul>"
"* foo\n* bar\n* baz\n"
describe "numbers" $ do
it "simple"
$ check
"<ol><li>foo</li><li>bar</li><li>baz</li></ol>"
"5. foo\n2. bar\n1. baz\n"
describe "headings" $ do
it "hashes"
$ check
"<h1>foo</h1><h2>bar</h2><h3>baz</h3>"
"# foo\n\n## bar\n\n###baz"
it "trailing hashes"
$ check
"<h1>foo</h1>"
"# foo ####"
it "underline"
$ check
"<h1>foo</h1><h2>bar</h2>"
"foo\n=============\n\nbar\n----------------\n"
describe "headings with ID" $ do
let withHeadingId = def { msAddHeadingId = True }
it "without spaces"
$ checkSet withHeadingId
"<h1 id=\"foo\">foo</h1><h2 id=\"bar\">bar</h2><h3 id=\"baz\">baz</h3>"
"# foo\n\n## bar\n\n###baz"
it "with spaces"
$ checkSet withHeadingId
"<h1 id=\"executive-summary\">Executive summary</h1>"
"# Executive summary"
it "with special characters"
$ checkSet withHeadingId
"<h1 id=\"executive-summary-.-_:\">Executive summary .!@#$%^*()-_=:</h1>"
"# Executive summary .!@#$%^*()-_=:"
describe "blockquotes" $ do
it "simple"
$ check
"<blockquote><p>foo</p><pre><code>bar</code></pre></blockquote>"
"> foo\n>\n> bar"
describe "links" $ do
it "simple" $ check "<p><a href=\"foo\">bar</a></p>" "[bar](foo)"
it "title" $ check
"<p><a href=\"foo\" title=\"baz\">bar</a></p>"
"[bar](foo \"baz\")"
it "escaped href" $ check
"<p><a href=\"foo)\" title=\"baz\">bar</a></p>"
"[bar](foo\\) \"baz\")"
it "escaped title" $ check
"<p><a href=\"foo)\" title=\"baz"\">bar</a></p>"
"[bar](foo\\) \"baz\\\"\")"
it "inside a paragraph" $ check
"<p>Hello <a href=\"foo\">bar</a> World</p>"
"Hello [bar](foo) World"
it "not a link" $ check
"<p>Not a [ link</p>"
"Not a [ link"
it "new tab" $ checkSet def { msLinkNewTab = True }
"<p><a href=\"foo\" target=\"_blank\">bar</a></p>"
"[bar](foo)"
{-
describe "github links" $ do
it "simple" $ check "<p><a href=\"foo\">bar</a></p>" "[[bar|foo]]"
it "no link text" $ check "<p><a href=\"foo\">foo</a></p>" "[[foo]]"
it "escaping" $ check "<p><a href=\"foo-baz-bin\">bar</a></p>" "[[bar|foo/baz bin]]"
it "inside a list" $ check "<ul><li><a href=\"foo\">foo</a></li></ul>" "* [[foo]]"
-}
describe "images" $ do
it "simple" $ check
"<p><img src=\"http://link.to/image.jpg\" alt=\"foo\"></p>"
""
it "title" $ check
"<p><img src=\"http://link.to/image.jpg\" alt=\"foo\" title=\"bar\"></p>"
""
it "inside a paragraph" $ check
"<p>Hello <img src=\"http://link.to/image.jpg\" alt=\"foo\"> World</p>"
"Hello  World"
it "not an image" $ check
"<p>Not an ![ image</p>"
"Not an ![ image"
describe "rules" $ do
let options = concatMap (\t -> [t, snoc t '\n'])
[ "* * *"
, "***"
, "*****"
, "- - -"
, "---------------------------------------"
, "----------------------------------"
]
forM_ options $ \o -> it (unpack o) $ check "<hr>" o
describe "html" $ do
it "inline" $ check "<p>foo<br>bar</p>" "foo<br>bar"
it "inline xss" $ check "<p>foo<br>bar</p>" "foo<br onclick='evil'>bar"
it "block" $ check "<div>hello world</div>" "<div>hello world</div>"
it "block xss" $ check "alert('evil')" "<script>alert('evil')</script>"
it "should be escaped" $ check "<p>1 < 2</p>" "1 < 2"
it "standalone" $ checkSet
def { msStandaloneHtml = Set.fromList ["<hidden>", "</hidden>"], msXssProtect = False }
"<hidden><pre><code class=\"haskell\">foo\nbar</code></pre></hidden>"
"<hidden>\n```haskell\nfoo\nbar\n```\n</hidden>\n"
describe "fencing" $ do
it "custom fencing" $ checkSet
def
{ msFencedHandlers = Map.union
(htmlFencedHandler "@@@" (\clazz -> T.concat ["<article class=\"", clazz, "\">"]) (const "</article>"))
(msFencedHandlers def)
}
"<article class=\"someclass\"><p>foo</p><blockquote><p>bar</p></blockquote></article>"
"@@@ someclass\nfoo\n\n> bar\n@@@"
describe "footnotes" $ do
it "inline" $
check "<p><a href=\"#footnote-1\" id=\"ref-1\">[1]</a>hello</p>"
"{1}hello"
it "references" $
check "<p><a href=\"#ref-1\" id=\"footnote-1\">[1]</a>hello</p>"
"{^1}hello"
describe "examples" $ sequence_ examples
describe "John Gruber's test suite" $ sequence_ gruber
it "comments without spaces #22" $
check "<!--<>-->" "<!--<>-->"
getExamples :: IO [Spec]
getExamples = do
files <- getDirectoryContents dir
mapM go $ filter (".md" `isSuffixOf`) files
where
dir = "test/examples"
go basename = do
let fp = dir </> basename
input <- TIO.readFile fp
output <- TIO.readFile $ replaceExtension fp "html"
let (checker, stripper)
| "-spec" `isInfixOf` fp = (check', dropFinalLF)
| otherwise = (check, T.strip)
return $ it basename $ checker (fromStrict $ stripper output) (fromStrict input)
dropFinalLF t = fromMaybe t $ T.stripSuffix "\n" t
getGruber :: IO [Spec]
getGruber = do
files <- getDirectoryContents dir
mapM go $ filter (".text" `isSuffixOf`) files
where
dir = "test/Tests"
go basename = do
let fp = dir </> basename
input <- TIO.readFile fp
output <- TIO.readFile $ replaceExtension fp "html"
return $ it basename $ checkNoNL (fromStrict $ T.strip output) (fromStrict input)
| thefalconfeat/markdown | test/main.hs | bsd-3-clause | 11,129 | 0 | 23 | 3,397 | 2,175 | 1,012 | 1,163 | 250 | 1 |
{-# LANGUAGE TypeSynonymInstances #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE InstanceSigs #-}
module PFDS.Commons.BinaryRandomAccessList where
import PFDS.Commons.RandomAccessList
import Prelude hiding (head, tail, lookup)
data Tree a = Leaf a | Node Int (Tree a) (Tree a) deriving (Show, Eq)
data Digit a = Zero | One (Tree a) deriving (Show, Eq)
type RList a = [Digit a]
type instance Elem (RList a) = a
instance RandomAccessList (RList a) where
empty :: RList a
empty = []
isEmpty :: RList a -> Bool
isEmpty [] = True
isEmpty _ = False
cons :: a -> RList a -> RList a
cons x ts = consTree (Leaf x) ts
head :: RList a -> a
head ts = let (Leaf x, _) = unconsTree ts in x
tail :: RList a -> RList a
tail ts = let (_, ts') = unconsTree ts in ts'
lookup :: Int -> RList a -> a
lookup i [] = error "Subscript"
lookup i (Zero : ts) = lookup i ts
lookup i (One t : ts) = if i < size t
then lookupTree i t
else lookup (i - size t) ts
update :: Int -> a -> RList a -> RList a
update i y [] = error "Subscript"
update i y (Zero : ts) = Zero : update i y ts
update i y (One t : ts) = if i < size t
then One (updateTree i y t) : ts
else One t : update (i - size t) y ts
-- helper functions
size :: Tree a -> Int
size (Leaf _) = 1
size (Node w _ _) = w
link :: Tree a -> Tree a -> Tree a
link t1 t2 = Node (size t1 + size t2) t1 t2
consTree :: Tree a -> RList a -> RList a
consTree t [] = [One t]
consTree t (Zero : ts) = One t : ts
consTree t1 (One t2 : ts) = Zero : consTree (link t1 t2) ts
unconsTree :: RList a -> (Tree a, RList a)
unconsTree [] = error "Empty"
unconsTree [One t] = (t, [])
unconsTree (One t : ts) = (t, Zero : ts)
unconsTree (Zero : ts) = let
(Node _ t1 t2, ts') = unconsTree ts
in (t1, One t2 : ts')
lookupTree :: Int -> Tree a -> a
lookupTree 0 (Leaf x) = x
lookupTree i (Leaf _) = error "Subscript"
lookupTree i (Node w t1 t2) = if i < w `div` 2
then lookupTree i t1
else lookupTree (i - w `div` 2) t2
updateTree :: Int -> a -> Tree a -> Tree a
updateTree 0 y (Leaf x) = Leaf y
updateTree i _ (Leaf _) = error "Subscript"
updateTree i y (Node w t1 t2) = if i < w `div` 2
then Node w (updateTree i y t1) t2
else Node w t1 (updateTree (i - w `div` 2) y t2)
| matonix/pfds | src/PFDS/Commons/BinaryRandomAccessList.hs | bsd-3-clause | 2,374 | 0 | 11 | 662 | 1,181 | 600 | 581 | 63 | 2 |
module Lib where
import System.Random
import Data.List
randList :: Int -> [ a ] -> IO [ a ]
randList n xs = do
gen <- newStdGen
let is = take n . nub $ randomRs (0, length xs - 1) gen :: [ Int ]
return (map (\ i -> xs !! i) is)
| ricardomiranda/haskellGeneticMagicSquare | src/Lib.hs | mit | 236 | 0 | 14 | 63 | 123 | 64 | 59 | 8 | 1 |
module Fields where
import Data.Aeson (FromJSON, ToJSON)
import GHC.Generics (Generic)
import Database.Persist.TH (derivePersistField)
data OS = IOS | Android deriving (Eq, Show, Read, Generic)
derivePersistField "OS"
instance FromJSON OS
instance ToJSON OS
| vyorkin-archive/assignment | api/src/Fields.hs | mit | 261 | 0 | 6 | 35 | 87 | 48 | 39 | -1 | -1 |
module FbVarBind where
f x = if x then 1 else x
| roberth/uu-helium | test/typeerrors/Examples/FbVarBind.hs | gpl-3.0 | 49 | 0 | 5 | 13 | 20 | 12 | 8 | 2 | 2 |
-- | The all-important theming engine!
--
-- Cf
-- https://hackage.haskell.org/package/vty/docs/Graphics-Vty-Attributes.html
-- http://hackage.haskell.org/package/brick/docs/Brick-AttrMap.html
-- http://hackage.haskell.org/package/brick-0.1/docs/Brick-Util.html
-- http://hackage.haskell.org/package/brick-0.1/docs/Brick-Widgets-Core.html#g:5
-- http://hackage.haskell.org/package/brick-0.1/docs/Brick-Widgets-Border.html
{-# LANGUAGE OverloadedStrings #-}
module Hledger.UI.Theme (
defaultTheme
,getTheme
,themes
,themeNames
) where
import qualified Data.Map as M
import Data.Maybe
import Data.Monoid
import Graphics.Vty
import Brick
import Brick.Widgets.Border
import Brick.Widgets.List
defaultTheme :: AttrMap
defaultTheme = fromMaybe (snd $ head themesList) $ getTheme "white"
-- the theme named here should exist;
-- otherwise it will take the first one from the list,
-- which must be non-empty.
-- | Look up the named theme, if it exists.
getTheme :: String -> Maybe AttrMap
getTheme name = M.lookup name themes
-- | A selection of named themes specifying terminal colours and styles.
-- One of these is active at a time.
--
-- A hledger-ui theme is a vty/brick AttrMap. Each theme specifies a
-- default style (Attr), plus extra styles which are applied when
-- their (hierarchical) name matches the widget rendering context.
-- "More specific styles, if present, are used and only fall back to
-- more general ones when the more specific ones are absent, but also
-- these styles get merged, so that if a more specific style only
-- provides the foreground color, its more general parent style can
-- set the background color, too."
-- For example: rendering a widget named "b" inside a widget named "a",
-- - if a style named "a" <> "b" exists, it will be used. Anything it
-- does not specify will be taken from a style named "a" if that
-- exists, otherwise from the default style.
-- - otherwise if a style named "a" exists, it will be used, and
-- anything it does not specify will be taken from the default style.
-- - otherwise (you guessed it) the default style is used.
--
themes :: M.Map String AttrMap
themes = M.fromList themesList
themeNames :: [String]
themeNames = map fst themesList
(&) = withStyle
themesList :: [(String, AttrMap)]
themesList = [
("default", attrMap
(black `on` white & bold) [ -- default style for this theme
("error", currentAttr `withForeColor` red),
(borderAttr , white `on` black & dim),
(borderAttr <> "bold", white `on` black & bold),
(borderAttr <> "query", cyan `on` black & bold),
(borderAttr <> "depth", yellow `on` black & bold),
(borderAttr <> "keys", white `on` black & bold),
(borderAttr <> "minibuffer", white `on` black & bold),
-- ("normal" , black `on` white),
("list" , black `on` white), -- regular list items
("list" <> "selected" , white `on` blue & bold), -- selected list items
-- ("list" <> "selected" , black `on` brightYellow),
-- ("list" <> "accounts" , white `on` brightGreen),
("list" <> "amount" <> "increase", currentAttr `withForeColor` green),
("list" <> "amount" <> "decrease", currentAttr `withForeColor` red),
("list" <> "balance" <> "positive", currentAttr `withForeColor` black),
("list" <> "balance" <> "negative", currentAttr `withForeColor` red),
("list" <> "amount" <> "increase" <> "selected", brightGreen `on` blue & bold),
("list" <> "amount" <> "decrease" <> "selected", brightRed `on` blue & bold),
("list" <> "balance" <> "positive" <> "selected", white `on` blue & bold),
("list" <> "balance" <> "negative" <> "selected", brightRed `on` blue & bold)
]),
("terminal", attrMap
defAttr [ -- use the current terminal's default style
(borderAttr , white `on` black),
-- ("normal" , defAttr),
(listAttr , defAttr),
(listSelectedAttr , defAttr & reverseVideo & bold)
-- ("status" , defAttr & reverseVideo)
]),
("greenterm", attrMap
(green `on` black) [
-- (listAttr , green `on` black),
(listSelectedAttr , black `on` green & bold)
])
-- ("colorful", attrMap
-- defAttr [
-- (listAttr , defAttr & reverseVideo),
-- (listSelectedAttr , defAttr `withForeColor` white `withBackColor` red)
-- -- ("status" , defAttr `withForeColor` black `withBackColor` green)
-- ])
]
-- halfbrightattr = defAttr & dim
-- reverseattr = defAttr & reverseVideo
-- redattr = defAttr `withForeColor` red
-- greenattr = defAttr `withForeColor` green
-- reverseredattr = defAttr & reverseVideo `withForeColor` red
-- reversegreenattr= defAttr & reverseVideo `withForeColor` green
| mstksg/hledger | hledger-ui/Hledger/UI/Theme.hs | gpl-3.0 | 5,139 | 0 | 12 | 1,349 | 763 | 479 | 284 | 51 | 1 |
import CpiLib
import CpiTest
import CpiODE
import CpiSemantics
import CpiLogic
import System.Environment (getArgs)
-- Time points
--tps = (100,(0,25))
-- Basic
f1 = Pos (0,infty) (ValGT (Conc (Def "P" [])) (R 0.05))
f2 = Pos (0,infty) (ValLE (Conc (Def "S" ["s"])) (R 0.01))
f3 = Nec (0,infty) (ValGT (Conc (Def "E" ["e"])) (R 0.01))
f4 = Nec (0,infty) (ValGT (Conc (Def "E" ["e"])) (R 0.4))
-- 1-nested TL
f5 = Nec (0,infty) f1
f6 = Nec (0,infty) f1
f7 = Pos (0,infty) f1
f8 = Pos (0,infty) f1
-- 2-nested TL
f9 = Pos (0,infty) f5
f10 = Pos (0,infty) f6
f11 = Nec (0,infty) f7
f12 = Nec (0,infty) f8
-- 3-nested TL
f13 = Nec (0,infty) f9
f14 = Nec (0,infty) f10
f15 = Pos (0,infty) f11
f16 = Pos (0,infty) f12
-- Basic Gtee
f17 = Gtee "In" f1
f18 = Gtee "In" f2
f19 = Gtee "In" f3
f20 = Gtee "In" f4
-- 1-nested Gtee
f21 = Pos (0,infty) f17
f22 = Pos (0,infty) f18
f23 = Pos (0,infty) f19
f24 = Pos (0,infty) f20
f25 = Nec (0,infty) f17
f26 = Nec (0,infty) f18
f27 = Nec (0,infty) f19
f28 = Nec (0,infty) f20
main = do env <- tEnv "models/testGT.cpi"
res <- getArgs
let tps = (read(res!!0),(0,25))
let pi = tProc env "Pi"
mts = processMTS env pi
pi' = wholeProc env pi mts
dpdt = dPdt' env mts pi'
odes = xdot env dpdt
inits = initials env pi' dpdt
ts = timePoints (read(res!!0)) (0,25)
soln = solveODE env pi' dpdt tps
ss = speciesIn env dpdt
trace = timeSeries ts soln ss
let r1 = {-# SCC "f17-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f17
r2 = {-# SCC "f17-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f17
r3 = {-# SCC "f17-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f17
r4 = {-# SCC "f17-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f17
print $ pretty f17
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f18-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f18
r2 = {-# SCC "f18-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f18
r3 = {-# SCC "f18-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f18
r4 = {-# SCC "f18-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f18
print $ pretty f18
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f19-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f19
r2 = {-# SCC "f19-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f19
r3 = {-# SCC "f19-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f19
r4 = {-# SCC "f19-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f19
print $ pretty f19
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f20-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f20
r2 = {-# SCC "f20-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f20
r3 = {-# SCC "f20-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f20
r4 = {-# SCC "f20-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f20
print $ pretty f20
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f21-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f21
r2 = {-# SCC "f21-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f21
r3 = {-# SCC "f21-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f21
r4 = {-# SCC "f21-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f21
print $ pretty f21
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f22-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f22
r2 = {-# SCC "f22-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f22
r3 = {-# SCC "f22-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f22
r4 = {-# SCC "f22-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f22
print $ pretty f22
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f23-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f23
r2 = {-# SCC "f23-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f23
r3 = {-# SCC "f23-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f23
r4 = {-# SCC "f23-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f23
print $ pretty f23
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f24-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f24
r2 = {-# SCC "f24-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f24
r3 = {-# SCC "f24-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f24
r4 = {-# SCC "f24-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f24
print $ pretty f24
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f25-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f25
r2 = {-# SCC "f25-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f25
r3 = {-# SCC "f25-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f25
r4 = {-# SCC "f25-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f25
print $ pretty f25
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f26-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f26
r2 = {-# SCC "f26-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f26
r3 = {-# SCC "f26-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f26
r4 = {-# SCC "f26-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f26
print $ pretty f26
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f27-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f27
r2 = {-# SCC "f27-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f27
r3 = {-# SCC "f27-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f27
r4 = {-# SCC "f27-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f27
print $ pretty f27
print r1
print r2
print r3
print r4
let r1 = {-# SCC "f28-Naive" #-} modelCheck
env solveODE (Just trace) pi tps f28
r2 = {-# SCC "f28-DynProg" #-} modelCheckDP
env solveODE (Just trace) pi tps f28
r3 = {-# SCC "f28-Hybrid" #-} modelCheckHy
env solveODE (Just trace) pi tps f28
r4 = {-# SCC "f28-Hybrid2" #-} modelCheckHy2
env solveODE (Just trace) pi tps f28
print $ pretty f28
print r1
print r2
print r3
print r4
| chrisbanks/cpiwb | profileMC2.hs | gpl-3.0 | 8,285 | 0 | 14 | 3,461 | 2,425 | 1,198 | 1,227 | 203 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- Module : Network.AWS.StorageGateway.DescribeCache
-- Copyright : (c) 2013-2014 Brendan Hay <[email protected]>
-- License : This Source Code Form is subject to the terms of
-- the Mozilla Public License, v. 2.0.
-- A copy of the MPL can be found in the LICENSE file or
-- you can obtain it at http://mozilla.org/MPL/2.0/.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : experimental
-- Portability : non-portable (GHC extensions)
--
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- | This operation returns information about the cache of a gateway. This
-- operation is supported only for the gateway-cached volume architecture.
--
-- The response includes disk IDs that are configured as cache, and it
-- includes the amount of cache allocated and used.
--
-- <http://docs.aws.amazon.com/storagegateway/latest/APIReference/API_DescribeCache.html>
module Network.AWS.StorageGateway.DescribeCache
(
-- * Request
DescribeCache
-- ** Request constructor
, describeCache
-- ** Request lenses
, dcGatewayARN
-- * Response
, DescribeCacheResponse
-- ** Response constructor
, describeCacheResponse
-- ** Response lenses
, dcrCacheAllocatedInBytes
, dcrCacheDirtyPercentage
, dcrCacheHitPercentage
, dcrCacheMissPercentage
, dcrCacheUsedPercentage
, dcrDiskIds
, dcrGatewayARN
) where
import Network.AWS.Data (Object)
import Network.AWS.Prelude
import Network.AWS.Request.JSON
import Network.AWS.StorageGateway.Types
import qualified GHC.Exts
newtype DescribeCache = DescribeCache
{ _dcGatewayARN :: Text
} deriving (Eq, Ord, Read, Show, Monoid, IsString)
-- | 'DescribeCache' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dcGatewayARN' @::@ 'Text'
--
describeCache :: Text -- ^ 'dcGatewayARN'
-> DescribeCache
describeCache p1 = DescribeCache
{ _dcGatewayARN = p1
}
dcGatewayARN :: Lens' DescribeCache Text
dcGatewayARN = lens _dcGatewayARN (\s a -> s { _dcGatewayARN = a })
data DescribeCacheResponse = DescribeCacheResponse
{ _dcrCacheAllocatedInBytes :: Maybe Integer
, _dcrCacheDirtyPercentage :: Maybe Double
, _dcrCacheHitPercentage :: Maybe Double
, _dcrCacheMissPercentage :: Maybe Double
, _dcrCacheUsedPercentage :: Maybe Double
, _dcrDiskIds :: List "DiskIds" Text
, _dcrGatewayARN :: Maybe Text
} deriving (Eq, Ord, Read, Show)
-- | 'DescribeCacheResponse' constructor.
--
-- The fields accessible through corresponding lenses are:
--
-- * 'dcrCacheAllocatedInBytes' @::@ 'Maybe' 'Integer'
--
-- * 'dcrCacheDirtyPercentage' @::@ 'Maybe' 'Double'
--
-- * 'dcrCacheHitPercentage' @::@ 'Maybe' 'Double'
--
-- * 'dcrCacheMissPercentage' @::@ 'Maybe' 'Double'
--
-- * 'dcrCacheUsedPercentage' @::@ 'Maybe' 'Double'
--
-- * 'dcrDiskIds' @::@ ['Text']
--
-- * 'dcrGatewayARN' @::@ 'Maybe' 'Text'
--
describeCacheResponse :: DescribeCacheResponse
describeCacheResponse = DescribeCacheResponse
{ _dcrGatewayARN = Nothing
, _dcrDiskIds = mempty
, _dcrCacheAllocatedInBytes = Nothing
, _dcrCacheUsedPercentage = Nothing
, _dcrCacheDirtyPercentage = Nothing
, _dcrCacheHitPercentage = Nothing
, _dcrCacheMissPercentage = Nothing
}
dcrCacheAllocatedInBytes :: Lens' DescribeCacheResponse (Maybe Integer)
dcrCacheAllocatedInBytes =
lens _dcrCacheAllocatedInBytes
(\s a -> s { _dcrCacheAllocatedInBytes = a })
dcrCacheDirtyPercentage :: Lens' DescribeCacheResponse (Maybe Double)
dcrCacheDirtyPercentage =
lens _dcrCacheDirtyPercentage (\s a -> s { _dcrCacheDirtyPercentage = a })
dcrCacheHitPercentage :: Lens' DescribeCacheResponse (Maybe Double)
dcrCacheHitPercentage =
lens _dcrCacheHitPercentage (\s a -> s { _dcrCacheHitPercentage = a })
dcrCacheMissPercentage :: Lens' DescribeCacheResponse (Maybe Double)
dcrCacheMissPercentage =
lens _dcrCacheMissPercentage (\s a -> s { _dcrCacheMissPercentage = a })
dcrCacheUsedPercentage :: Lens' DescribeCacheResponse (Maybe Double)
dcrCacheUsedPercentage =
lens _dcrCacheUsedPercentage (\s a -> s { _dcrCacheUsedPercentage = a })
dcrDiskIds :: Lens' DescribeCacheResponse [Text]
dcrDiskIds = lens _dcrDiskIds (\s a -> s { _dcrDiskIds = a }) . _List
dcrGatewayARN :: Lens' DescribeCacheResponse (Maybe Text)
dcrGatewayARN = lens _dcrGatewayARN (\s a -> s { _dcrGatewayARN = a })
instance ToPath DescribeCache where
toPath = const "/"
instance ToQuery DescribeCache where
toQuery = const mempty
instance ToHeaders DescribeCache
instance ToJSON DescribeCache where
toJSON DescribeCache{..} = object
[ "GatewayARN" .= _dcGatewayARN
]
instance AWSRequest DescribeCache where
type Sv DescribeCache = StorageGateway
type Rs DescribeCache = DescribeCacheResponse
request = post "DescribeCache"
response = jsonResponse
instance FromJSON DescribeCacheResponse where
parseJSON = withObject "DescribeCacheResponse" $ \o -> DescribeCacheResponse
<$> o .:? "CacheAllocatedInBytes"
<*> o .:? "CacheDirtyPercentage"
<*> o .:? "CacheHitPercentage"
<*> o .:? "CacheMissPercentage"
<*> o .:? "CacheUsedPercentage"
<*> o .:? "DiskIds" .!= mempty
<*> o .:? "GatewayARN"
| romanb/amazonka | amazonka-storagegateway/gen/Network/AWS/StorageGateway/DescribeCache.hs | mpl-2.0 | 5,884 | 0 | 22 | 1,236 | 896 | 527 | 369 | 98 | 1 |
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- |
-- Module : Network.AWS.SWF.DeprecateWorkflowType
-- Copyright : (c) 2013-2015 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Deprecates the specified /workflow type/. After a workflow type has been
-- deprecated, you cannot create new executions of that type. Executions
-- that were started before the type was deprecated will continue to run. A
-- deprecated workflow type may still be used when calling visibility
-- actions.
--
-- This operation is eventually consistent. The results are best effort and
-- may not exactly reflect recent updates and changes.
--
-- __Access Control__
--
-- You can use IAM policies to control this action\'s access to Amazon SWF
-- resources as follows:
--
-- - Use a 'Resource' element with the domain name to limit the action to
-- only specified domains.
-- - Use an 'Action' element to allow or deny permission to call this
-- action.
-- - Constrain the following parameters by using a 'Condition' element
-- with the appropriate keys.
-- - 'workflowType.name': String constraint. The key is
-- 'swf:workflowType.name'.
-- - 'workflowType.version': String constraint. The key is
-- 'swf:workflowType.version'.
--
-- If the caller does not have sufficient permissions to invoke the action,
-- or the parameter values fall outside the specified constraints, the
-- action fails. The associated event attribute\'s __cause__ parameter will
-- be set to OPERATION_NOT_PERMITTED. For details and example IAM policies,
-- see
-- <http://docs.aws.amazon.com/amazonswf/latest/developerguide/swf-dev-iam.html Using IAM to Manage Access to Amazon SWF Workflows>.
--
-- /See:/ <http://docs.aws.amazon.com/amazonswf/latest/apireference/API_DeprecateWorkflowType.html AWS API Reference> for DeprecateWorkflowType.
module Network.AWS.SWF.DeprecateWorkflowType
(
-- * Creating a Request
deprecateWorkflowType
, DeprecateWorkflowType
-- * Request Lenses
, dDomain
, dWorkflowType
-- * Destructuring the Response
, deprecateWorkflowTypeResponse
, DeprecateWorkflowTypeResponse
) where
import Network.AWS.Prelude
import Network.AWS.Request
import Network.AWS.Response
import Network.AWS.SWF.Types
import Network.AWS.SWF.Types.Product
-- | /See:/ 'deprecateWorkflowType' smart constructor.
data DeprecateWorkflowType = DeprecateWorkflowType'
{ _dDomain :: !Text
, _dWorkflowType :: !WorkflowType
} deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'DeprecateWorkflowType' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'dDomain'
--
-- * 'dWorkflowType'
deprecateWorkflowType
:: Text -- ^ 'dDomain'
-> WorkflowType -- ^ 'dWorkflowType'
-> DeprecateWorkflowType
deprecateWorkflowType pDomain_ pWorkflowType_ =
DeprecateWorkflowType'
{ _dDomain = pDomain_
, _dWorkflowType = pWorkflowType_
}
-- | The name of the domain in which the workflow type is registered.
dDomain :: Lens' DeprecateWorkflowType Text
dDomain = lens _dDomain (\ s a -> s{_dDomain = a});
-- | The workflow type to deprecate.
dWorkflowType :: Lens' DeprecateWorkflowType WorkflowType
dWorkflowType = lens _dWorkflowType (\ s a -> s{_dWorkflowType = a});
instance AWSRequest DeprecateWorkflowType where
type Rs DeprecateWorkflowType =
DeprecateWorkflowTypeResponse
request = postJSON sWF
response = receiveNull DeprecateWorkflowTypeResponse'
instance ToHeaders DeprecateWorkflowType where
toHeaders
= const
(mconcat
["X-Amz-Target" =#
("SimpleWorkflowService.DeprecateWorkflowType" ::
ByteString),
"Content-Type" =#
("application/x-amz-json-1.0" :: ByteString)])
instance ToJSON DeprecateWorkflowType where
toJSON DeprecateWorkflowType'{..}
= object
(catMaybes
[Just ("domain" .= _dDomain),
Just ("workflowType" .= _dWorkflowType)])
instance ToPath DeprecateWorkflowType where
toPath = const "/"
instance ToQuery DeprecateWorkflowType where
toQuery = const mempty
-- | /See:/ 'deprecateWorkflowTypeResponse' smart constructor.
data DeprecateWorkflowTypeResponse =
DeprecateWorkflowTypeResponse'
deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'DeprecateWorkflowTypeResponse' with the minimum fields required to make a request.
--
deprecateWorkflowTypeResponse
:: DeprecateWorkflowTypeResponse
deprecateWorkflowTypeResponse = DeprecateWorkflowTypeResponse'
| fmapfmapfmap/amazonka | amazonka-swf/gen/Network/AWS/SWF/DeprecateWorkflowType.hs | mpl-2.0 | 5,287 | 0 | 12 | 1,094 | 508 | 315 | 193 | 71 | 1 |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE helpset PUBLIC "-//Sun Microsystems Inc.//DTD JavaHelp HelpSet Version 2.0//EN" "http://java.sun.com/products/javahelp/helpset_2_0.dtd">
<helpset version="2.0" xml:lang="sr-SP">
<title>Customizable HTML Report</title>
<maps>
<homeID>top</homeID>
<mapref location="map.jhm"/>
</maps>
<view>
<name>TOC</name>
<label>Contents</label>
<type>org.zaproxy.zap.extension.help.ZapTocView</type>
<data>toc.xml</data>
</view>
<view>
<name>Index</name>
<label>Index</label>
<type>javax.help.IndexView</type>
<data>index.xml</data>
</view>
<view>
<name>Search</name>
<label>Search</label>
<type>javax.help.SearchView</type>
<data engine="com.sun.java.help.search.DefaultSearchEngine">
JavaHelpSearch
</data>
</view>
<view>
<name>Favorites</name>
<label>Favorites</label>
<type>javax.help.FavoritesView</type>
</view>
</helpset> | veggiespam/zap-extensions | addOns/customreport/src/main/javahelp/org/zaproxy/zap/extension/customreport/resources/help_sr_SP/helpset_sr_SP.hs | apache-2.0 | 970 | 79 | 66 | 158 | 411 | 208 | 203 | -1 | -1 |
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE DataKinds #-}
module Haskus.System.Linux.Internals.IfLink where
import Haskus.Utils.Types.Generics (Generic)
import Haskus.Format.Binary.Storable
import Haskus.Format.Binary.Vector (Vector)
import Haskus.Format.Binary.Word
import Haskus.Format.Binary.BitSet
import Haskus.Format.Binary.Endianness
-- =============================================================
-- From linux/include/uapi/linux/if_link.h
-- =============================================================
-- This struct should be in sync with struct rtnl_link_stats64
data LinkStats = LinkStats
{ linkStatsRxPackets :: !Word32 -- ^ total packets received
, linkStatsTxPackets :: !Word32 -- ^ total packets transmitted
, linkStatsRxBytes :: !Word32 -- ^ total bytes received
, linkStatsTxBytes :: !Word32 -- ^ total bytes transmitted
, linkStatsRxErrors :: !Word32 -- ^ bad packets received
, linkStatsTxErrors :: !Word32 -- ^ packet transmit problems
, linkStatsRxDropped :: !Word32 -- ^ no space in linux buffers
, linkStatsTxDropped :: !Word32 -- ^ no space available in linux
, linkStatsMulticast :: !Word32 -- ^ multicast packets received
, linkStatsCollisions :: !Word32
-- detailed rx_errors:
, linkStatsRxLengthErrors :: !Word32
, linkStatsRxOverErrors :: !Word32 -- ^ receiver ring buff overflow
, linkStatsRxCrcErrors :: !Word32 -- ^ recved pkt with crc error
, linkStatsRxFrameErrors :: !Word32 -- ^ recv'd frame alignment error
, linkStatsRxFifoErrors :: !Word32 -- ^ recv'r fifo overrun
, linkStatsRxMissedErrors :: !Word32 -- ^ receiver missed packet
-- detailed tx_errors
, linkStatsTxAbortedErrors :: !Word32
, linkStatsTxCarrierErrors :: !Word32
, linkStatsTxFifoErrors :: !Word32
, linkStatsTxHeartbeatErrors :: !Word32
, linkStatsTxWindowErrors :: !Word32
-- for cslip etc
, linkStatsRxCompressed :: !Word32
, linkStatsTxCompressed :: !Word32
, linkStatsRxNohandler :: !Word32 -- ^ dropped, no handler found
}
deriving (Generic,Storable,Show)
-- | The main device statistics structure
data LinkStats64 = LinkStats64
{ linkStats64RxPackets :: !Word64 -- ^ total packets received
, linkStats64TxPackets :: !Word64 -- ^ total packets transmitted
, linkStats64RxBytes :: !Word64 -- ^ total bytes received
, linkStats64TxBytes :: !Word64 -- ^ total bytes transmitted
, linkStats64RxErrors :: !Word64 -- ^ bad packets received
, linkStats64TxErrors :: !Word64 -- ^ packet transmit problems
, linkStats64RxDropped :: !Word64 -- ^ no space in linux buffers
, linkStats64TxDropped :: !Word64 -- ^ no space available in linux
, linkStats64Multicast :: !Word64 -- ^ multicast packets received
, linkStats64Collisions :: !Word64
-- detailed rx_errors:
, linkStats64RxLengthErrors :: !Word64
, linkStats64RxOverErrors :: !Word64 -- ^ receiver ring buff overflow
, linkStats64RxCrcErrors :: !Word64 -- ^ recved pkt with crc error
, linkStats64RxFrameErrors :: !Word64 -- ^ recv'd frame alignment error
, linkStats64RxFifoErrors :: !Word64 -- ^ recv'r fifo overrun
, linkStats64RxMissedErrors :: !Word64 -- ^ receiver missed packet
-- detailed tx_errors
, linkStats64TxAbortedErrors :: !Word64
, linkStats64TxCarrierErrors :: !Word64
, linkStats64TxFifoErrors :: !Word64
, linkStats64TxHeartbeatErrors :: !Word64
, linkStats64TxWindowErrors :: !Word64
-- for cslip etc
, linkStats64RxCompressed :: !Word64
, linkStats64TxCompressed :: !Word64
, linkStats64RxNohandler :: !Word64 -- ^ dropped, no handler found
}
deriving (Generic,Storable,Show)
-- | The struct should be in sync with struct ifmap
data LinkIfMap = LinkIfMap
{ linkIfMapMemStart :: !Word64
, linkIfMapMemEnd :: !Word64
, linkIfMapBaseAddr :: !Word64
, linkIfMapIRQ :: !Word16
, linkIfMapDMA :: !Word8
, linkIfMapPort :: !Word8
}
deriving (Generic,Storable,Show)
--
-- IFLA_AF_SPEC
-- Contains nested attributes for address family specific attributes.
-- Each address family may create a attribute with the address family
-- number as type and create its own attribute structure in it.
--
-- Example:
-- [IFLA_AF_SPEC] = {
-- [AF_INET] = {
-- [IFLA_INET_CONF] = ...,
-- },
-- [AF_INET6] = {
-- [IFLA_INET6_FLAGS] = ...,
-- [IFLA_INET6_CONF] = ...,
-- }
-- }
--
data InterfaceFlag
= IflaUNSPEC
| IflaADDRESS
| IflaBROADCAST
| IflaIFNAME
| IflaMTU
| IflaLINK
| IflaQDISC
| IflaSTATS
| IflaCOST
| IflaPRIORITY
| IflaMASTER
| IflaWIRELESS -- ^ Wireless Extension event - see wireless.h
| IflaPROTINFO -- ^ Protocol specific information for a link
| IflaTXQLEN
| IflaMAP
| IflaWEIGHT
| IflaOPERSTATE
| IflaLINKMODE
| IflaLINKINFO
| IflaNET_NS_PID
| IflaIFALIAS
| IflaNUM_VF -- ^ Number of VFs if device is SR-IOV PF
| IflaVFINFO_LIST
| IflaSTATS64
| IflaVF_PORTS
| IflaPORT_SELF
| IflaAF_SPEC
| IflaGROUP -- ^ Group the device belongs to
| IflaNET_NS_FD
| IflaEXT_MASK -- ^ Extended info mask, VFs, etc.
| IflaPROMISCUITY -- ^ Promiscuity count: > 0 means acts PROMISC
| IflaNUM_TX_QUEUES
| IflaNUM_RX_QUEUES
| IflaCARRIER
| IflaPHYS_PORT_ID
| IflaCARRIER_CHANGES
| IflaPHYS_SWITCH_ID
| IflaLINK_NETNSID
| IflaPHYS_PORT_NAME
| IflaPROTO_DOWN
| IflaGSO_MAX_SEGS
| IflaGSO_MAX_SIZE
| IflaPAD
| IflaXDP
deriving (Show,Eq,Enum)
data InterfaceFlagInet
= IflaInetUnspec
| IflaInetConf
deriving (Show,Eq,Enum)
-- ifi_flags.
--
-- IFF_* flags.
--
-- The only change is:
-- IFF_LOOPBACK, IFF_BROADCAST and IFF_POINTOPOINT are
-- more not changeable by user. They describe link media
-- characteristics and set by device driver.
--
-- Comments:
-- - Combination IFF_BROADCAST|IFF_POINTOPOINT is invalid
-- - If neither of these three flags are set;
-- the interface is NBMA.
--
-- - IFF_MULTICAST does not mean anything special:
-- multicasts can be used on all not-NBMA links.
-- IFF_MULTICAST means that this media uses special encapsulation
-- for multicast frames. Apparently, all IFF_POINTOPOINT and
-- IFF_BROADCAST devices are able to use multicasts too.
--
-- IFLA_LINK.
-- For usual devices it is equal ifi_index.
-- If it is a "virtual interface" (f.e. tunnel), ifi_link
-- can point to real physical interface (f.e. for bandwidth calculations),
-- or maybe 0, what means, that real media is unknown (usual
-- for IPIP tunnels, when route to endpoint is allowed to change)
--
-- | Subtype attributes for IFLA_PROTINFO
data IflaInet6
= IflaInet6UNSPEC
| IflaInet6FLAGS -- ^ link flags
| IflaInet6CONF -- ^ sysctl parameters
| IflaInet6STATS -- ^ statistics
| IflaInet6MCAST -- ^ MC things. What of them?
| IflaInet6CACHEINFO -- ^ time values and max reasm size
| IflaInet6ICMP6STATS -- ^ statistics (icmpv6)
| IflaInet6TOKEN -- ^ device token
| IflaInet6ADDR_GEN_MODE -- ^ implicit address generator mode
deriving (Show,Eq,Enum)
data In6AddrGenMode
= In6AddrGenModeEUI64
| In6AddrGenModeNone
| In6AddrGenModeStablePrivacy
| In6AddrGenModeRandom
deriving (Show,Eq,Enum)
--------------------
-- Bridge section
--------------------
data IflaBridge
= IFLA_BR_UNSPEC
| IFLA_BR_FORWARD_DELAY
| IFLA_BR_HELLO_TIME
| IFLA_BR_MAX_AGE
| IFLA_BR_AGEING_TIME
| IFLA_BR_STP_STATE
| IFLA_BR_PRIORITY
| IFLA_BR_VLAN_FILTERING
| IFLA_BR_VLAN_PROTOCOL
| IFLA_BR_GROUP_FWD_MASK
| IFLA_BR_ROOT_ID
| IFLA_BR_BRIDGE_ID
| IFLA_BR_ROOT_PORT
| IFLA_BR_ROOT_PATH_COST
| IFLA_BR_TOPOLOGY_CHANGE
| IFLA_BR_TOPOLOGY_CHANGE_DETECTED
| IFLA_BR_HELLO_TIMER
| IFLA_BR_TCN_TIMER
| IFLA_BR_TOPOLOGY_CHANGE_TIMER
| IFLA_BR_GC_TIMER
| IFLA_BR_GROUP_ADDR
| IFLA_BR_FDB_FLUSH
| IFLA_BR_MCAST_ROUTER
| IFLA_BR_MCAST_SNOOPING
| IFLA_BR_MCAST_QUERY_USE_IFADDR
| IFLA_BR_MCAST_QUERIER
| IFLA_BR_MCAST_HASH_ELASTICITY
| IFLA_BR_MCAST_HASH_MAX
| IFLA_BR_MCAST_LAST_MEMBER_CNT
| IFLA_BR_MCAST_STARTUP_QUERY_CNT
| IFLA_BR_MCAST_LAST_MEMBER_INTVL
| IFLA_BR_MCAST_MEMBERSHIP_INTVL
| IFLA_BR_MCAST_QUERIER_INTVL
| IFLA_BR_MCAST_QUERY_INTVL
| IFLA_BR_MCAST_QUERY_RESPONSE_INTVL
| IFLA_BR_MCAST_STARTUP_QUERY_INTVL
| IFLA_BR_NF_CALL_IPTABLES
| IFLA_BR_NF_CALL_IP6TABLES
| IFLA_BR_NF_CALL_ARPTABLES
| IFLA_BR_VLAN_DEFAULT_PVID
| IFLA_BR_PAD
| IFLA_BR_VLAN_STATS_ENABLED
| IFLA_BR_MCAST_STATS_ENABLED
| IFLA_BR_MCAST_IGMP_VERSION
| IFLA_BR_MCAST_MLD_VERSION
deriving (Show,Eq,Enum)
data BridgeId = BridgeId
{ bridgePriority :: Vector 2 Word8
, bridgeAddress :: Vector 6 Word8 -- ETH_ALEN
}
deriving (Generic,Storable,Show)
data BridgeMode
= BridgeModeUnspec
| BridgeModeHairpin
deriving (Show,Eq,Enum)
data BridgePort
= BridgePortUNSPEC
| BridgePortSTATE -- ^ Spanning tree state
| BridgePortPRIORITY -- ^ Spanning tree priority
| BridgePortCOST -- ^ Spanning tree cost
| BridgePortMODE -- ^ mode (hairpin)
| BridgePortGUARD -- ^ bpdu guard
| BridgePortPROTECT -- ^ root port protection
| BridgePortFAST_LEAVE -- ^ multicast fast leave
| BridgePortLEARNING -- ^ mac learning
| BridgePortUNICAST_FLOOD -- ^ flood unicast traffic
| BridgePortPROXYARP -- ^ proxy ARP
| BridgePortLEARNING_SYNC -- ^ mac learning sync from device
| BridgePortPROXYARP_WIFI -- ^ proxy ARP for Wi-Fi
| BridgePortROOT_ID -- ^ designated root
| BridgePortBRIDGE_ID -- ^ designated bridge
| BridgePortDESIGNATED_PORT
| BridgePortDESIGNATED_COST
| BridgePortID
| BridgePortNO
| BridgePortTOPOLOGY_CHANGE_ACK
| BridgePortCONFIG_PENDING
| BridgePortMESSAGE_AGE_TIMER
| BridgePortFORWARD_DELAY_TIMER
| BridgePortHOLD_TIMER
| BridgePortFLUSH
| BridgePortMULTICAST_ROUTER
| BridgePortPAD
| BridgePortMCAST_FLOOD
| BridgePortMCAST_TO_UCAST
| BridgePortVLAN_TUNNEL
| BridgePortBCAST_FLOOD
deriving (Show,Eq,Enum)
data CacheInfo = CacheInfo
{ cacheInfoMaxReasmLen :: Word32
, cacheInfoTimestamp :: Word32 -- ^ ipv6InterfaceTable updated timestamp
, cacheInfoReachableTime :: Word32
, cacheInfoRetransTime :: Word32
}
deriving (Generic,Storable,Show)
data InterfaceInfo
= InterfaceInfoUNSPEC
| InterfaceInfoKIND
| InterfaceInfoDATA
| InterfaceInfoXSTATS
| InterfaceInfoSLAVE_KIND
| InterfaceInfoSLAVE_DATA
deriving (Show,Eq,Enum)
-----------------
-- VLAN section
-----------------
data VLAN
= VLAN_UNSPEC
| VLAN_ID
| VLAN_FLAGS
| VLAN_EGRESS_QOS
| VLAN_INGRESS_QOS
| VLAN_PROTOCOL
deriving (Show,Eq,Enum)
data VLANFlags = VLANFlags
{ vlanFlags :: !Word32
, vlanMask :: !Word32
}
deriving (Generic,Storable,Show)
data VLAN_QOS
= VLAN_QOS_UNSPEC
| VLAN_QOS_MAPPING
deriving (Show,Eq,Enum)
data VLAN_QOS_Mapping = VLAN_QOS_Mapping
{ vlanQosMappingFrom :: !Word32
, vlanQosMappingTo :: !Word32
}
deriving (Generic,Storable,Show)
--------------------
-- MACVLAN section
--------------------
data MACVLAN
= MACVLAN_UNSPEC
| MACVLAN_MODE
| MACVLAN_FLAGS
| MACVLAN_MACADDR_MODE
| MACVLAN_MACADDR
| MACVLAN_MACADDR_DATA
| MACVLAN_MACADDR_COUNT
deriving (Show,Eq,Enum)
data MACVLAN_Mode
= MACVLAN_MODE_PRIVATE -- ^ don't talk to other macvlans
| MACVLAN_MODE_VEPA -- ^ talk to other ports through ext bridge
| MACVLAN_MODE_BRIDGE -- ^ talk to bridge ports directly
| MACVLAN_MODE_PASSTHRU -- ^ take over the underlying device
| MACVLAN_MODE_SOURCE -- ^ use source MAC address list to assign
deriving (Show,Eq,Enum)
type MACVLAN_Modes = BitSet Word32 MACVLAN_Mode
data MacAddrMode
= MACADDR_ADD
| MACADDR_DEL
| MACADDR_FLUSH
| MACADDR_SET
deriving (Show,Eq,Enum)
-- #define MACVLAN_FLAG_NOPROMISC 1
----------------
-- VRF section
----------------
data VRF
= VRF_UNSPEC
| VRF_TABLE
deriving (Show,Eq,Enum)
data VRF_Port
= VRF_PORT_UNSPEC
| VRF_PORT_TABLE
deriving (Show,Eq,Enum)
------------------
-- MACSEC section
------------------
data MACSEC
= MACSEC_UNSPEC
| MACSEC_SCI
| MACSEC_PORT
| MACSEC_ICV_LEN
| MACSEC_CIPHER_SUITE
| MACSEC_WINDOW
| MACSEC_ENCODING_SA
| MACSEC_ENCRYPT
| MACSEC_PROTECT
| MACSEC_INC_SCI
| MACSEC_ES
| MACSEC_SCB
| MACSEC_REPLAY_PROTECT
| MACSEC_VALIDATION
| MACSEC_PAD
deriving (Show,Eq,Enum)
data MACSEC_ValidationType
= MACSEC_VALIDATE_DISABLED
| MACSEC_VALIDATE_CHECK
| MACSEC_VALIDATE_STRICT
| MACSEC_VALIDATE_END
deriving (Show,Eq,Enum)
-------------------
-- IPVLAN section
-------------------
data IPVLAN
= IPVLAN_UNSPEC
| IPVLAN_MODE
deriving (Show,Eq,Enum)
data IPVLAN_Mode
= IPVLAN_MODE_L2
| IPVLAN_MODE_L3
| IPVLAN_MODE_L3S
deriving (Show,Eq,Enum)
-------------------
-- VXLAN section
-------------------
data VXLAN
= VXLAN_UNSPEC
| VXLAN_ID
| VXLAN_GROUP -- ^ group or remote address
| VXLAN_LINK
| VXLAN_LOCAL
| VXLAN_TTL
| VXLAN_TOS
| VXLAN_LEARNING
| VXLAN_AGEING
| VXLAN_LIMIT
| VXLAN_PORT_RANGE -- ^ source port
| VXLAN_PROXY
| VXLAN_RSC
| VXLAN_L2MISS
| VXLAN_L3MISS
| VXLAN_PORT -- ^ destination port
| VXLAN_GROUP6
| VXLAN_LOCAL6
| VXLAN_UDP_CSUM
| VXLAN_UDP_ZERO_CSUM6_TX
| VXLAN_UDP_ZERO_CSUM6_RX
| VXLAN_REMCSUM_TX
| VXLAN_REMCSUM_RX
| VXLAN_GBP
| VXLAN_REMCSUM_NOPARTIAL
| VXLAN_COLLECT_METADATA
| VXLAN_LABEL
| VXLAN_GPE
deriving (Show,Eq,Enum)
data VXLAN_PortRange = VXLAN_PortRange
{ vxlanPortRangeLow :: AsBigEndian Word16
, vxlanPortRangeHigh :: AsBigEndian Word16
}
deriving (Generic,Storable,Show)
-------------------
-- GENEVE section
-------------------
data Geneve
= GENEVE_UNSPEC
| GENEVE_ID
| GENEVE_REMOTE
| GENEVE_TTL
| GENEVE_TOS
| GENEVE_PORT -- ^ destination port
| GENEVE_COLLECT_METADATA
| GENEVE_REMOTE6
| GENEVE_UDP_CSUM
| GENEVE_UDP_ZERO_CSUM6_TX
| GENEVE_UDP_ZERO_CSUM6_RX
| GENEVE_LABEL
deriving (Show,Eq,Enum)
----------------
-- PPP section
----------------
data PPP
= PPP_UNSPEC
| PPP_DEV_FD
deriving (Show,Eq,Enum)
----------------
-- GTP section
----------------
data GTP_Role
= GTP_ROLE_GGSN
| GTP_ROLE_SGSN
deriving (Show,Eq,Enum)
data GTP
= GTP_UNSPEC
| GTP_FD0
| GTP_FD1
| GTP_PDP_HASHSIZE
| GTP_ROLE
deriving (Show,Eq,Enum)
--------------------
-- Bonding section
--------------------
data Bond
= BOND_UNSPEC
| BOND_MODE
| BOND_ACTIVE_SLAVE
| BOND_MIIMON
| BOND_UPDELAY
| BOND_DOWNDELAY
| BOND_USE_CARRIER
| BOND_ARP_INTERVAL
| BOND_ARP_IP_TARGET
| BOND_ARP_VALIDATE
| BOND_ARP_ALL_TARGETS
| BOND_PRIMARY
| BOND_PRIMARY_RESELECT
| BOND_FAIL_OVER_MAC
| BOND_XMIT_HASH_POLICY
| BOND_RESEND_IGMP
| BOND_NUM_PEER_NOTIF
| BOND_ALL_SLAVES_ACTIVE
| BOND_MIN_LINKS
| BOND_LP_INTERVAL
| BOND_PACKETS_PER_SLAVE
| BOND_AD_LACP_RATE
| BOND_AD_SELECT
| BOND_AD_INFO
| BOND_AD_ACTOR_SYS_PRIO
| BOND_AD_USER_PORT_KEY
| BOND_AD_ACTOR_SYSTEM
| BOND_TLB_DYNAMIC_LB
deriving (Show,Eq,Enum)
data BondAdInfo
= BOND_AD_INFO_UNSPEC
| BOND_AD_INFO_AGGREGATOR
| BOND_AD_INFO_NUM_PORTS
| BOND_AD_INFO_ACTOR_KEY
| BOND_AD_INFO_PARTNER_KEY
| BOND_AD_INFO_PARTNER_MAC
deriving (Show,Eq,Enum)
data BondSlave
= BOND_SLAVE_UNSPEC
| BOND_SLAVE_STATE
| BOND_SLAVE_MII_STATUS
| BOND_SLAVE_LINK_FAILURE_COUNT
| BOND_SLAVE_PERM_HWADDR
| BOND_SLAVE_QUEUE_ID
| BOND_SLAVE_AD_AGGREGATOR_ID
| BOND_SLAVE_AD_ACTOR_OPER_PORT_STATE
| BOND_SLAVE_AD_PARTNER_OPER_PORT_STATE
deriving (Show,Eq,Enum)
----------------------------------------------
-- SR-IOV virtual function management section
----------------------------------------------
data VF_Info
= VF_INFO_UNSPEC
| VF_INFO
deriving (Show,Eq,Enum)
data VF_TYPE
= VF_TYPE_UNSPEC
| VF_TYPE_MAC -- ^ Hardware queue specific attributes
| VF_TYPE_VLAN -- ^ VLAN ID and QoS
| VF_TYPE_TX_RATE -- ^ Max TX Bandwidth Allocation
| VF_TYPE_SPOOFCHK -- ^ Spoof Checking on/off switch
| VF_TYPE_LINK_STATE -- ^ link state enable/disable/auto switch
| VF_TYPE_RATE -- ^ Min and Max TX Bandwidth Allocation
| VF_TYPE_RSS_QUERY_EN -- ^ RSS Redirection Table and Hash Key query
-- on/off switch
| VF_TYPE_STATS -- ^ network device statistics
| VF_TYPE_TRUST -- ^ Trust VF
| VF_TYPE_IB_NODE_GUID -- ^ VF Infiniband node GUID
| VF_TYPE_IB_PORT_GUID -- ^ VF Infiniband port GUID
| VF_TYPE_VLAN_LIST -- ^ nested list of vlans, option for QinQ
deriving (Show,Eq,Enum)
data VF_MAC = VF_MAC
{ vfMacVF :: !Word32
, vfMacMac :: Vector 32 Word8 -- MAX_ADDR_LEN
}
deriving (Generic,Storable,Show)
data VF_VLAN = VF_VLAN
{ vfVlanVF :: !Word32
, vfVlanVLAN :: !Word32 -- ^ 0 - 4095, 0 disables VLAN filter
, vfVlanQOS :: !Word32
}
deriving (Generic,Storable,Show)
data VF_VLAN_INFO
= VF_VLAN_INFO_UNSPEC
| VF_VLAN_INFO -- ^ VLAN ID, QoS and VLAN protocol
deriving (Show,Eq,Enum)
data VFVlanInfo = VFVlanInfo
{ vfVlanInfoVF :: !Word32
, vfVlanInfoVLAN :: !Word32 -- ^ 0 - 4095, 0 disables VLAN filter
, vfVlanInfoQOS :: !Word32
, vfVlanProto :: AsBigEndian Word16 -- ^ VLAN protocol either 802.1Q or 802.1ad
}
deriving (Generic,Storable,Show)
data VF_TX_RATE = VF_TX_RATE
{ vfTxRateVF :: !Word32
, vfTxRateRate :: !Word32 -- ^ Max TX bandwidth in Mbps, 0 disables throttling
}
deriving (Generic,Storable,Show)
data VF_RATE = VF_RATE
{ vfRateVF :: !Word32
, vfRateMinTxRate :: !Word32 -- ^ Min Bandwidth in Mbps
, vfRateMaxTxRate :: !Word32 -- ^ Max Bandwidth in Mbps
}
deriving (Generic,Storable,Show)
data VF_SpoofCheck = VF_SpoofCheck
{ vfSpoofCheckVF :: !Word32
, vfSpoofCheckSetting :: !Word32
}
deriving (Generic,Storable,Show)
data VF_GUID = VF_GUID
{ vfGuidVF :: !Word32
, vfGuidGUID :: !Word64
}
deriving (Generic,Storable,Show)
data VF_LINK_STATE
= VF_LINK_STATE_AUTO -- ^ link state of the uplink
| VF_LINK_STATE_ENABLE -- ^ link always up
| VF_LINK_STATE_DISABLE -- ^ link always down
deriving (Show,Eq,Enum)
data VFLinkState = VFLinkState
{ vfLinkStateVF :: !Word32
, vfLinkStateState :: !Word32
}
deriving (Generic,Storable,Show)
data VF_RSS_QUERY_EN = VF_RSS_QUERY_EN
{ vfRssQueryVF :: !Word32
, vfRssQuerySetting :: !Word32
}
deriving (Generic,Storable,Show)
data VF_STATS
= VF_STATS_RX_PACKETS
| VF_STATS_TX_PACKETS
| VF_STATS_RX_BYTES
| VF_STATS_TX_BYTES
| VF_STATS_BROADCAST
| VF_STATS_MULTICAST
| VF_STATS_PAD
deriving (Show,Eq,Enum)
data VF_Trust = VF_Trust
{ vfTrustVF :: !Word32
, vfTrustSetting :: !Word32
}
deriving (Generic,Storable,Show)
-- VF ports management section
--
-- Nested layout of set/get msg is:
--
-- [IFLA_NUM_VF]
-- [IFLA_VF_PORTS]
-- [IFLA_VF_PORT]
-- [IFLA_PORT_*], ...
-- [IFLA_VF_PORT]
-- [IFLA_PORT_*], ...
-- ...
-- [IFLA_PORT_SELF]
-- [IFLA_PORT_*], ...
--
data VF_PORT
= VF_PORT_UNSPEC
| VF_PORT -- ^ nest
deriving (Show,Eq,Enum)
data PORT
= PORT_UNSPEC
| PORT_VF -- __u32
| PORT_PROFILE -- string
| PORT_VSI_TYPE -- 802.1Qbg (pre-)standard VDP
| PORT_INSTANCE_UUID -- binary UUID
| PORT_HOST_UUID -- binary UUID
| PORT_REQUEST -- __u8
| PORT_RESPONSE -- __u16, output only
deriving (Show,Eq,Enum)
-- #define PORT_PROFILE_MAX 40
-- #define PORT_UUID_MAX 16
-- #define PORT_SELF_VF -1
data Request
= REQUEST_PREASSOCIATE
| REQUEST_PREASSOCIATE_RR
| REQUEST_ASSOCIATE
| REQUEST_DISASSOCIATE
deriving (Show,Eq,Enum)
data PORT_VDP
= PORT_VDP_RESPONSE_SUCCESS
| PORT_VDP_RESPONSE_INVALID_FORMAT
| PORT_VDP_RESPONSE_INSUFFICIENT_RESOURCES
| PORT_VDP_RESPONSE_UNUSED_VTID
| PORT_VDP_RESPONSE_VTID_VIOLATION
| PORT_VDP_RESPONSE_VTID_VERSION_VIOALTION
| PORT_VDP_RESPONSE_OUT_OF_SYNC
-- 0x08-0xFF reserved for future VDP use (TODO)
| PORT_PROFILE_RESPONSE_SUCCESS -- = 0x100
| PORT_PROFILE_RESPONSE_INPROGRESS
| PORT_PROFILE_RESPONSE_INVALID
| PORT_PROFILE_RESPONSE_BADSTATE
| PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES
| PORT_PROFILE_RESPONSE_ERROR
deriving (Show,Eq)
data PORT_VSI = PORT_VSI
{ vsiManagerId :: !Word8
, vsiTypeId :: Vector 3 Word8
, vsiTypeVersion :: !Word8
, vsiPadding :: Vector 3 Word8
}
deriving (Generic,Storable,Show)
-----------------
-- IPoIB section
-----------------
data IPOIB
= IPOIB_UNSPEC
| IPOIB_PKEY
| IPOIB_MODE
| IPOIB_UMCAST
deriving (Show,Eq,Enum)
data IPOIB_MODE
= IPOIB_MODE_DATAGRAM -- ^ using unreliable datagram QPs
| IPOIB_MODE_CONNECTED -- ^ using connected QPs
deriving (Show,Eq,Enum)
----------------
-- HSR section
----------------
data HSR
= HSR_UNSPEC
| HSR_SLAVE1
| HSR_SLAVE2
| HSR_MULTICAST_SPEC -- ^ Last byte of supervision addr
| HSR_SUPERVISION_ADDR -- ^ Supervision frame multicast addr
| HSR_SEQ_NR
| HSR_VERSION -- ^ HSR version
deriving (Show,Eq,Enum)
-----------------
-- STATS section
-----------------
data InterfaceStatsMsg = InterfaceStatsMsg
{ ifStatsMsgFamily :: !Word8
, ifStatsMsgPad1 :: !Word8
, ifStatsMsgPad2 :: !Word16
, ifStatsMsgIfIndex :: !Word32
, ifStatsMsgFilterMask :: !Word32
}
deriving (Generic,Storable,Show)
-- A stats attribute can be netdev specific or a global stat.
-- For netdev stats, lets use the prefix IFLA_STATS_LINK_*
data STATS
= STATS_UNSPEC -- ^ also used as 64bit pad attribute
| STATS_LINK_64
| STATS_LINK_XSTATS
| STATS_LINK_XSTATS_SLAVE
| STATS_LINK_OFFLOAD_XSTATS
| STATS_AF_SPEC
deriving (Show,Eq,Enum)
-- #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1))
-- These are embedded into IFLA_STATS_LINK_XSTATS:
-- [IFLA_STATS_LINK_XSTATS]
-- -> [LINK_XSTATS_TYPE_xxx]
-- -> [rtnl link type specific attributes]
data LINK_XSTATS_TYPE
= LINK_XSTATS_TYPE_UNSPEC
| LINK_XSTATS_TYPE_BRIDGE
deriving (Show,Eq,Enum)
-- These are stats embedded into IFLA_STATS_LINK_OFFLOAD_XSTATS
data OFFLOAD_XSTATS
= OFFLOAD_XSTATS_UNSPEC
| OFFLOAD_XSTATS_CPU_HIT -- ^ struct rtnl_link_stats64
deriving (Show,Eq,Enum)
---------------
-- XDP section
---------------
-- #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
-- #define XDP_FLAGS_SKB_MODE (1U << 1)
-- #define XDP_FLAGS_DRV_MODE (1U << 2)
-- #define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
-- XDP_FLAGS_SKB_MODE | \
-- XDP_FLAGS_DRV_MODE)
-- These are stored into IFLA_XDP_ATTACHED on dump.
data XDP_ATTACHED
= XDP_ATTACHED_NONE
| XDP_ATTACHED_DRV
| XDP_ATTACHED_SKB
deriving (Show,Eq,Enum)
data XDP
= XDP_UNSPEC
| XDP_FD
| XDP_ATTACHED
| XDP_FLAGS
deriving (Show,Eq,Enum)
| hsyl20/ViperVM | haskus-system/src/lib/Haskus/System/Linux/Internals/IfLink.hs | bsd-3-clause | 23,467 | 0 | 9 | 5,076 | 3,463 | 2,149 | 1,314 | 783 | 0 |
module Language.Pads.Parser where
{-
** *********************************************************************
* *
* (c) Kathleen Fisher <[email protected]> *
* John Launchbury <[email protected]> *
* *
************************************************************************
-}
-- This is the parser for the PADS syntax in Haskell
import Text.Parsec hiding (upper,lower)
import qualified Text.Parsec.String as PS
import qualified Text.Parsec.Prim as PP
import qualified Text.Parsec.Token as PT
import Text.ParserCombinators.Parsec.Language (haskellStyle, reservedOpNames, reservedNames)
import Text.ParserCombinators.Parsec.Pos (newPos)
import qualified Language.Haskell.Meta as LHM -- Supports parsing Haskell forms
import Language.Pads.Syntax -- Defines syntx tree for PADS forms
import Language.Haskell.TH -- Defines data structures for Haskell Code
import Data.Char (isUpper, isLower)
import Control.Monad (guard)
type Parser = PS.Parser
type Env = [String]
-- The main entry point for the QuasiQuoter is parsePadsDecls.
-- To find documentation for many Haskell library functions, go to
-- http://www.haskell.org/hoogle/
-- and enter the name of the function.
parsePadsDecls :: SourceName -> Line -> Column -> String -> Either ParseError [PadsDecl]
parsePadsDecls fileName line column input
= PP.parse (do { setPosition (newPos fileName line column)
; whiteSpace
; x <- decls
; whiteSpace
; eof <|> errorParse
; return x
}) fileName input
-- This function consumes input until the eof marker.
errorParse = do
{ rest <- manyTill anyToken eof
; unexpected rest }
-------------------------
-- PADS DECLARATIONS
-------------------------
decls :: Parser [PadsDecl]
decls = many decl
decl :: Parser PadsDecl
decl
= typeDecl <|> dataDecl <|> newDecl <|> obtainDecl
<?> "Pads declaration keyword"
typeDecl :: Parser PadsDecl
typeDecl
= do { reserved "type"
; (id,env) <- padsID; pat <- patLHS
; rhs <- ptype env
; return (PadsDeclType id env pat rhs)
} <?> "Pads type declaration"
dataDecl :: Parser PadsDecl
dataDecl
= do { reserved "data"
; (id,env) <- padsID; pat <- patLHS
; rhs <- dataRHS env; drvs <- option [] derives
; return (PadsDeclData id env pat rhs drvs)
} <?> "Pads data declaration"
newDecl :: Parser PadsDecl
newDecl
= do { reserved "newtype"
; (id,env) <- padsID; pat <- patLHS
; rhs <- newRHS env; drvs <- option [] derives
; return (PadsDeclNew id env pat rhs drvs)
} <?> "Pads newtype declaration"
obtainDecl :: Parser PadsDecl
obtainDecl
= do { reserved "obtain"
; (id,env) <- padsID
; reservedOp "from"; rhs <- ptype env
; reserved "using"; exp <- expression
; return (PadsDeclObtain id env rhs exp)
} <?> "Pads transform type"
padsID
= do { id <- upper; env <- try $ many lower
; return (id,env)
}
patLHS
= do { p <- try $ haskellParsePatTill "="
; return (Just p)
}
<|> (reservedOp "=" >> return Nothing)
derives
= reserved "deriving" >>
(do { q <- qualUpper; return [q] }
<|> parens (commaSep1 qualUpper))
-------------------------
-- PADS TYPES
-------------------------
ptype :: Env -> Parser PadsTy
ptype env
= constrain env
<|> obtain env
<|> partition env
<|> listTy env
<|> value env
<|> btype env
<?> "Pads Pads type expression"
constrain :: Env -> Parser PadsTy
constrain env
= do { reserved "constrain"
; pat <- haskellParsePatTill "::"
; ty <- ptype env
; exp <- predic
; return (PConstrain pat ty exp)
} <?> "Pads constrain type"
predic = do { reservedOp "where"; expression }
obtain :: Env -> Parser PadsTy
obtain env
= do { reserved "obtain"; dst <- ptype env
; reservedOp "from"; src <- ptype env
; reserved "using"; exp <- expression
; return (PTransform src dst exp)
} <?> "Pads transform type"
partition :: Env -> Parser PadsTy
partition env
= do { reserved "partition"; ty <- ptype env
; reserved "using"; exp <- expression
; return (PPartition ty exp)
} <?> "Pads partition type"
listTy :: Env -> Parser PadsTy
listTy env
= do { (elm,sepM) <- brackets (listInside env)
; termM <- listEnd env
; return (PList elm sepM termM)
} <?> "Pads list type"
listInside env
= do { elm <- ptype env
; sepM <- optionMaybe (reservedOp "|" >> ptype env)
; return (elm,sepM)
}
listEnd env
= optionMaybe
( do {reservedOp "terminator"; t<-ptype env; return (LTerm t)}
<|> do {reservedOp "length"; e<-expression; return (LLen e)})
value env
= do { reserved "value"
; exp <- expression; reservedOp "::"
; ty <- ptype env
; return (PValue exp ty)
}
btype :: Env -> Parser PadsTy
btype env
= try $ do
{ ty <- etype env; tys <- many (atype env)
; expM <- optionMaybe (try expression);
; if length tys==0 && expM == Nothing
then return ty
else return (PApp (ty:tys) expM) }
etype :: Env -> Parser PadsTy
etype env = atype env
<|> try (expression >>= (return . PExpression))
atype env
= try (tuple env)
<|> do { (elm,sepM) <- brackets (listInside env)
; return (PList elm sepM Nothing)}
<|> fmap PTycon qualUpper
<|> fmap PTyvar (tyvar env)
tuple :: Env -> Parser PadsTy
tuple env
= do { tys <- parens $ option [] (commaSep1 (ptype env))
; case length tys of
0 -> return (PTycon ["Void"])
1 -> return (head tys)
_ -> return (PTuple tys)
}
<?> "Pads tuple type"
------------------------------
-- PADS DATA DECLARATIONS
------------------------------
dataRHS :: Env -> Parser PadsData
dataRHS env
= switchTy env
<|> fmap PUnion (constrs env)
<?> "Pads data type right hand side"
switchTy :: Env -> Parser PadsData
switchTy env
= do { reservedOp "case"; exp <- expression
; reservedOp "of"; brs <- branch env `sepBy1` reservedOp "|"
; return (PSwitch exp brs)
} <?> "Pads switch type"
branch :: Env -> Parser (Pat, BranchInfo)
branch env
= do { pat <- haskellParsePatTill "->"; br <- constr env
; return (pat, br)
} <?> "Pads switch branch"
constrs :: Env -> Parser [BranchInfo]
constrs env = constr env `sepBy1` reservedOp "|"
constr :: Env -> Parser BranchInfo
constr env
= do { id <- upper;
; do { args <- record env; predM <- optionMaybe predic
; return (BRecord id args predM)}
<|> do { args <- option (mkId id) (constrArgs env)
; predM <- optionMaybe predic
; return (BConstr id args predM)}}
where
mkId id = [(NotStrict, PExpression (LitE (StringL id)))]
-- Provides the expansion e.g.: Tue -> Tue "Tue"
constrArgs :: Env -> Parser [ConstrArg]
constrArgs env
= many1 $ do
{ bang <- option NotStrict (reservedOp "!" >> return IsStrict)
; ty <- etype env
; return (bang,ty)
}
record :: Env -> Parser [FieldInfo]
record env
= do { reservedOp "{"
; flds <- field env `sepBy` reservedOp ","
; reservedOp "}"
; return flds
} <?> "Pads record type"
field :: Env -> Parser FieldInfo
field env
= try (do { id <- (lower << reservedOp "::")
; ty <- ftype env
; predM <- optionMaybe predic
; return (Just id, ty, predM)
})
<|> try (do { id <- lower; reservedOp "="
; reserved "value"
; exp <- expression; reservedOp "::"
; (strict,ty) <- ftype env
; predM <- optionMaybe predic
; return (Just id, (strict, PValue exp ty), predM)
})
<|> do { ty <- ftype env
; predM <- optionMaybe predic
; return (Nothing, ty, predM)
}
<?> "record field"
ftype env
= do { reservedOp "!"; ty <- atype env; return (IsStrict,ty)}
<|> do { ty <- ptype env; return (NotStrict,ty)}
-------------------------------
-- PADS NEW TYPE DECLARATIONS
-------------------------------
newRHS :: Env -> Parser BranchInfo
newRHS env
= do { id <- upper;
; do { rec <- record1 env
; predM <- optionMaybe predic
; return (BRecord id rec predM)}
<|> do { arg <- atype env
; predM <- optionMaybe predic
; return (BConstr id [(NotStrict,arg)] predM)
}
}
record1 :: Env -> Parser [FieldInfo]
record1 env
= do { reservedOp "{"
; args1 <- many (ftype env << reservedOp ",")
; fld <- field1 env
; args2 <- many (reservedOp "," >> ftype env)
; reservedOp "}"
; return (map expand args1 ++ [fld] ++ map expand args2)
} <?> "Pads newtype record"
where
expand fty = (Nothing, fty, Nothing)
field1 :: Env -> Parser FieldInfo
field1 env
= do { id <- lower; reservedOp "::"; ty <- ptype env
; predM <- optionMaybe predic
; return (Just id, (NotStrict,ty), predM)
}
-----------------------------------
-- HASKELL IN PADS DECLARATIONS
-----------------------------------
expression :: Parser Exp
expression = haskellExp
<|> literal
haskellExp :: Parser (Exp)
haskellExp = do { reservedOp "<|"
; haskellParseExpTill "|>"
}
<?> "Pads Haskell expression"
haskellParseExp :: String -> Parser Exp
haskellParseExp str = case LHM.parseExp str of
Left err -> parserZero
Right expTH -> return expTH
haskellParseExpTill :: String -> Parser Exp
haskellParseExpTill op = do { str <- manyTill anyChar (reservedOp op)
; haskellParseExp str
}
haskellParsePat :: String -> Parser Pat
haskellParsePat str = case LHM.parsePat str of
Left err -> parserZero
Right patTH -> return patTH
haskellParsePatTill :: String -> Parser Pat
haskellParsePatTill op = do { str <- manyTill anyChar (reservedOp op)
; haskellParsePat str
}
literal :: Parser Exp
literal = fmap (LitE . CharL) (try charLiteral)
<|> reLiteral
<|> fmap (LitE . StringL) stringLiteral
<|> fmap (LitE . IntegerL) (try integer)
<|> fmap (VarE . mkName . qName) qualLower
<|> fmap (ConE . mkName . qName) qualUpper
<?> "Pads literal"
reLiteral :: Parser Exp
reLiteral = do { reservedOp reMark
; str <- manyTill anyChar (reservedOp reMark)
; return (ConE (mkName "RE") `AppE` LitE (StringL str))
}
reMark = "'"
qualUpper, qualLower :: Parser QString
qualUpper = try (upper `sepBy1` reservedOp ".")
qualLower = try $ do { prefix <- many (upper << reservedOp ".")
; final <- lower
; return (prefix ++ [final])
}
upper :: Parser String
upper = try $ do { id <- identifier
; guard $ isUpper (head id)
; return id}
lower :: Parser String
lower = try $ do { id <- identifier
; guard $ isLower (head id)
; return id}
tyvar env = try $ do { v <- lower
; guard (v `elem` env)
; return v }
---------------
p << q = do {x<-p;q;return x}
lexer :: PT.TokenParser ()
lexer = PT.makeTokenParser (haskellStyle
{ reservedOpNames = ["=", "=>", "{", "}", "::", "<|", "|>", "|", reMark, "." ],
reservedNames = ["data", "type", "newtype", "old", "existing", "deriving",
"using", "where", "terminator", "length", "of", "from",
"case", "constrain", "obtain", "partition","value" ]})
whiteSpace = PT.whiteSpace lexer
identifier = PT.identifier lexer
operator = PT.operator lexer
reserved = PT.reserved lexer
reservedOp = PT.reservedOp lexer
charLiteral = PT.charLiteral lexer
stringLiteral = PT.stringLiteral lexer
integer = PT.integer lexer
commaSep1 = PT.commaSep1 lexer
parens = PT.parens lexer
braces = PT.braces lexer
brackets = PT.brackets lexer
| GaloisInc/pads-haskell | Language/Pads/Parser.hs | bsd-3-clause | 12,460 | 4 | 15 | 3,754 | 3,951 | 2,016 | 1,935 | 299 | 3 |
-- | Provides the main entry point and supports the following commands:
--
-- * @clone@ - Clone a remote repository using the native git protocol.
-- Similar to the @git clone@ command.
--
-- * @ls-remote@ - List references in a remote repository.
--
-- * @unpack@ - Unpack a pack file into a bare repository.
--
-- * @read-index@ - Read a @.git/index@ file and show the index entries.
module Main (main, run) where
import System.Environment (getArgs)
import Data.Maybe (listToMaybe)
import Git.Store.Index (IndexEntry(..), readIndex)
import Git.Remote.Operations
import Git.Store.Unpack
main :: IO ()
main = do
args <- getArgs
case args of
(cmd:xs) -> run cmd xs
_ -> error $ "usage: hgit <command> [<args>]\n\n" ++
"Supported commands are:\n" ++
"clone <repo> [<dir>] Clone a repository into a new directory\n" ++
"ls-remote <repo> List references in a remote repository\n" ++
"unpack <file> Unpack a pack file into a bare repository.\n" ++
"read-index <file> Read a `.git/index` file and show the index entries."
-- | Execute the given command
run :: String -> [String] -> IO ()
run "clone" (url:xs) = clone url $ listToMaybe xs
run "ls-remote" (url:_) = lsRemote url
run "unpack" (name:file:_) = unpack name file
run "read-index" (file:pattern:_) = do
entries <- readIndex file
printIndex $ filter (\e -> path e == pattern) entries
run "read-index" (file:_) = printIndex =<< readIndex file
run _ _ = error "Unknown command or missing arguments"
printIndex :: [IndexEntry] -> IO ()
printIndex = mapM_ (putStrLn . (++ "\n") . show)
| fcharlie/hgit | src/Main.hs | bsd-3-clause | 1,970 | 0 | 15 | 686 | 379 | 204 | 175 | 28 | 2 |
{-# LANGUAGE CPP #-}
{-# LANGUAGE DeriveDataTypeable, TemplateHaskell #-}
module Data.IntTrie (
IntTrie(..),
construct,
lookup,
TrieLookup(..),
#ifdef TESTS
tests,
prop,
#endif
) where
import Prelude hiding (lookup)
import Data.Typeable (Typeable)
import qualified Data.Array.Unboxed as A
import Data.Array.IArray ((!))
import qualified Data.Bits as Bits
import Data.Word (Word32)
import Data.List hiding (lookup)
import Data.Function (on)
import Data.SafeCopy (base, deriveSafeCopy)
import Distribution.Server.Framework.Instances()
import Distribution.Server.Framework.MemSize
-- | A compact mapping from sequences of small ints to small ints.
--
newtype IntTrie k v = IntTrie (A.UArray Word32 Word32)
deriving (Show, Typeable)
-- Version 0 used 16-bit integers and is no longer supported
-- (To upgrade, DELETE /server-status/tarindices to wipe the tar indices state)
$(deriveSafeCopy 1 'base ''IntTrie)
instance MemSize (IntTrie k v) where
memSize (IntTrie o) = memSizeUArray 4 o
-- Compact, read-only implementation of a trie. It's intended for use with file
-- paths, but we do that via string ids.
#ifdef TESTS
-- Example mapping:
--
example0 :: [(FilePath, Int)]
example0 =
[("foo-1.0/foo-1.0.cabal", 512) -- tar block 1
,("foo-1.0/LICENSE", 2048) -- tar block 4
,("foo-1.0/Data/Foo.hs", 4096)] -- tar block 8
-- After converting path components to integers this becomes:
--
example1 :: Paths Word32 Word32
example1 =
[([1,2], 512)
,([1,3], 2048)
,([1,4,5], 4096)]
-- As a trie this looks like:
-- [ (1, *) ]
-- |
-- [ (2, 512), (3, 1024), (4, *) ]
-- |
-- [ (5, 4096) ]
-- We use an intermediate trie representation
example2 :: Trie Word32 Word32
example2 = Trie [ Node 1 t1 ]
where
t1 = Trie [ Leaf 2 512, Leaf 3 2048, Node 4 t2 ]
t2 = Trie [ Leaf 5 4096 ]
example2' :: Trie Word32 Word32
example2' = Trie [ Node 0 t1 ]
where
t1 = Trie [ Node 3 t2 ]
t2 = Trie [ Node 1 t3, Node 2 t4 ]
t3 = Trie [ Leaf 4 10608 ]
t4 = Trie [ Leaf 4 10612 ]
{-
0: [1,N0,3]
3: [1,N3,6]
6: [2,N1,N2,11,12]
11: [1,4,10608]
14: [1,4,10612]
-}
example2'' :: Trie Word32 Word32
example2'' = Trie [ Node 1 t1, Node 2 t2 ]
where
t1 = Trie [ Leaf 4 10608 ]
t2 = Trie [ Leaf 4 10612 ]
example2''' :: Trie Word32 Word32
example2''' = Trie [ Node 0 t3 ]
where
t3 = Trie [ Node 4 t8, Node 6 t11 ]
t8 = Trie [ Node 1 t14 ]
t11 = Trie [ Leaf 5 10605 ]
t14 = Trie [ Node 2 t19, Node 3 t22 ]
t19 = Trie [ Leaf 7 10608 ]
t22 = Trie [ Leaf 7 10612 ]
{-
0: [1,N0,3]
3: [2,N4,N6,8,11]
8: [1,N1,11]
11: [1,5,10605]
14: [2,N2,N3,16,19]
19: [1,7,10608]
22: [1,7,10612]
-}
-- We convert from the 'Paths' to the 'Trie' using 'mkTrie':
--
test1 = example2 == mkTrie example1
#endif
-- Each node has a size and a sequence of keys followed by an equal length
-- sequnce of corresponding entries. Since we're going to flatten this into
-- a single array then we will need to replace the trie structure with pointers
-- represented as array offsets.
-- Each node is a pair of arrays, one of keys and one of Either value pointer.
-- We need to distinguish values from internal pointers. We use a tag bit:
--
tagLeaf, tagNode, untag :: Word32 -> Word32
tagLeaf = id
tagNode = flip Bits.setBit 31
untag = flip Bits.clearBit 31
-- So the overall array form of the above trie is:
--
-- offset: 0 1 2 3 4 5 6 7 8 9 10 11 12
-- array: [ 1 | N1 | 3 ][ 3 | 2, 3, N4 | 512, 2048, 10 ][ 1 | 5 | 4096 ]
-- \__/ \___/
#ifdef TESTS
example3 :: [Word32]
example3 =
[1, tagNode 1,
3,
3, tagLeaf 2, tagLeaf 3, tagNode 4,
512, 2048, 10,
1, tagLeaf 5,
4096
]
-- We get the array form by using flattenTrie:
test2 = example3 == flattenTrie example2
example4 :: IntTrie Int Int
example4 = IntTrie (mkArray example3)
test3 = case lookup example4 [1] of
Just (Completions [2,3,4]) -> True
_ -> False
test1, test2, test3, tests :: Bool
tests = test1 && test2 && test3
#endif
-------------------------------------
-- Toplevel trie array construction
--
-- So constructing the 'IntTrie' as a whole is just a matter of stringing
-- together all the bits
-- | Build an 'IntTrie' from a bunch of (key, value) pairs, where the keys
-- are sequences.
--
construct :: (Ord k, Enum k, Enum v) => [([k], v)] -> IntTrie k v
construct = IntTrie . mkArray . flattenTrie . mkTrie
mkArray :: [Word32] -> A.UArray Word32 Word32
mkArray xs = A.listArray (0, fromIntegral (length xs) - 1) xs
---------------------------------
-- Looking up in the trie array
--
data TrieLookup k v = Entry !v | Completions [k] deriving Show
lookup :: (Enum k, Enum v) => IntTrie k v -> [k] -> Maybe (TrieLookup k v)
lookup (IntTrie arr) = fmap convertLookup . go 0 . convertKey
where
go :: Word32 -> [Word32] -> Maybe (TrieLookup Word32 Word32)
go nodeOff [] = Just (completions nodeOff)
go nodeOff (k:ks) = case search nodeOff (tagLeaf k) of
Just entryOff
| null ks -> Just (entry entryOff)
| otherwise -> Nothing
Nothing -> case search nodeOff (tagNode k) of
Nothing -> Nothing
Just entryOff -> go (arr ! entryOff) ks
entry entryOff = Entry (arr ! entryOff)
completions nodeOff = Completions [ untag (arr ! keyOff)
| keyOff <- [keysStart..keysEnd] ]
where
nodeSize = arr ! nodeOff
keysStart = nodeOff + 1
keysEnd = nodeOff + nodeSize
search :: Word32 -> Word32 -> Maybe Word32
search nodeOff key = fmap (+nodeSize) (bsearch keysStart keysEnd key)
where
nodeSize = arr ! nodeOff
keysStart = nodeOff + 1
keysEnd = nodeOff + nodeSize
bsearch :: Word32 -> Word32 -> Word32 -> Maybe Word32
bsearch a b key
| a > b = Nothing
| otherwise = case compare key (arr ! mid) of
LT -> bsearch a (mid-1) key
EQ -> Just mid
GT -> bsearch (mid+1) b key
where mid = (a + b) `div` 2
convertKey :: Enum k => [k] -> [Word32]
convertKey = map (fromIntegral . fromEnum)
convertLookup :: (Enum k, Enum v) => TrieLookup Word32 Word32
-> TrieLookup k v
convertLookup (Entry v) = Entry (word16ToEnum v)
convertLookup (Completions ks) = Completions (map word16ToEnum ks)
word16ToEnum :: Enum n => Word32 -> n
word16ToEnum = toEnum . fromIntegral
-------------------------
-- Intermediate Trie type
--
-- The trie node functor
data TrieNodeF k v x = Leaf k v | Node k x deriving (Eq, Show)
instance Functor (TrieNodeF k v) where
fmap _ (Leaf k v) = Leaf k v
fmap f (Node k x) = Node k (f x)
-- The trie functor
type TrieF k v x = [TrieNodeF k v x]
-- Trie is the fixpoint of the 'TrieF' functor
newtype Trie k v = Trie (TrieF k v (Trie k v)) deriving (Eq, Show)
unfoldTrieNode :: (s -> TrieNodeF k v [s]) -> s -> TrieNodeF k v (Trie k v)
unfoldTrieNode f = fmap (unfoldTrie f) . f
unfoldTrie :: (s -> TrieNodeF k v [s]) -> [s] -> Trie k v
unfoldTrie f = Trie . map (unfoldTrieNode f)
{-
trieSize :: Trie k v -> Int
trieSize (Trie ts) = 1 + sum (map trieNodeSize ts)
trieNodeSize :: TrieNodeF k v (Trie k v) -> Int
trieNodeSize (Leaf _ _) = 2
trieNodeSize (Node _ t) = 2 + trieSize t
-}
---------------------------------
-- Building and flattening Tries
--
-- A list of non-empty key-lists paired
type Paths k v = [([k], v)]
mkTrie :: Ord k => Paths k v -> Trie k v
mkTrie = unfoldTrie (fmap split) . split
. sortBy (compare `on` fst)
where
split :: Eq k => Paths k v -> TrieF k v (Paths k v)
split = map mkGroup . groupBy ((==) `on` (head . fst))
where
mkGroup = \ksvs@((k0:_,v0):_) ->
case [ (ks, v) | (_:ks, v) <- ksvs, not (null ks) ] of
[] -> Leaf k0 v0
ksvs' -> Node k0 ksvs'
type Offset = Int
-- This is a breadth-first traversal. We keep a list of the tries that we are
-- to write out next. Each of these have an offset allocated to them at the
-- time we put them into the list. We keep a running offset so we know where
-- to allocate next.
--
flattenTrie :: (Enum k, Enum v) => Trie k v -> [Word32]
flattenTrie trie = go [trie] (size trie)
where
size (Trie tns) = 1 + 2 * length tns
go :: (Enum k, Enum v) => [Trie k v] -> Offset -> [Word32]
go [] _ = []
go (Trie tnodes:tries) offset = flat ++ go (tries++tries') offset'
where
count = length tnodes
flat = fromIntegral count : keys ++ values
(keys, values) = unzip (sortBy (compare `on` fst) keysValues)
(keysValues, tries', offset') = doNodes offset [] [] tnodes
doNodes off kvs ts' [] = (kvs, reverse ts', off)
doNodes off kvs ts' (tn:tns) = case tn of
Leaf k v -> doNodes off (leafKV k v :kvs) ts' tns
Node k t -> doNodes (off + size t) (nodeKV k off:kvs) (t:ts') tns
leafKV k v = (tagLeaf (enum2Word32 k), enum2Word32 v)
nodeKV k o = (tagNode (enum2Word32 k), int2Word32 o)
int2Word32 :: Int -> Word32
int2Word32 = fromIntegral
enum2Word32 :: Enum n => n -> Word32
enum2Word32 = int2Word32 . fromEnum
-------------------------
-- Correctness property
--
#ifdef TESTS
prop :: (Show from, Show to, Enum from, Enum to, Ord from, Eq to) => Paths from to -> Bool
prop paths =
flip all paths $ \(key, value) ->
case lookup trie key of
Just (Entry value') | value' == value -> True
Just (Entry value') -> error $ "IntTrie: " ++ show (key, value, value')
Nothing -> error $ "IntTrie: didn't find " ++ show key
Just (Completions xs) -> error $ "IntTrie: " ++ show xs
where
trie = construct paths
--TODO: missing data abstraction property
#endif
| mpickering/hackage-server | Data/IntTrie.hs | bsd-3-clause | 10,011 | 0 | 15 | 2,731 | 3,074 | 1,661 | 1,413 | 113 | 6 |
module Main where
import Database.HaskellDB
import Database.HaskellDB.FieldType
import Database.HaskellDB.DBSpec
import System.Environment (getArgs)
test = DBInfo {dbname = "ctest", opts = testopts, tbls = [testtbl1,testtbl2]}
testopts = DBOptions {useBString = False}
testtbl1 = TInfo {tname = "ctesttbl1", cols = [testcol11,testcol12]}
testtbl2 = TInfo {tname = "ctesttbl2", cols = [testcol21,testcol22]}
testcol11 = CInfo {cname = "ctestcol11", descr = (IntT,False)}
testcol12 = CInfo {cname = "ctestcol12", descr = (BStrT 8,True)}
testcol21 = CInfo {cname = "ctestcol21", descr = (BStrT 6,False)}
testcol22 = CInfo {cname = "ctestcol22", descr = (IntT,True)}
main = do
args <- getArgs
let db = genericConnect (head args) (tail args)
db (\a -> dbSpecToDatabase a test)
| m4dc4p/haskelldb | test/old/dbspec.hs | bsd-3-clause | 802 | 0 | 12 | 136 | 288 | 173 | 115 | 17 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Network.Pubnub
import Network.Pubnub.Types
import Data.Aeson
import Control.Concurrent
import qualified Data.Text as T
main :: IO ()
main = do
let pn = defaultPN{channels=["hello_world"], sub_key="demo", pub_key="demo"}
_ <- subscribe pn defaultSubscribeOptions{ onMsg = output
, onConnect = putStrLn "Connected..." }
_ <- threadDelay 1000000
hello <- publish pn "hello_world" ("hello" :: T.Text)
print hello
hello2 <- history pn "hello_world" [ Reverse True
, Count 2] :: IO (Maybe (History Value))
print hello2
return ()
output :: Maybe Value -> IO ()
output = print
| pubnub/haskell | examples/HelloWorld/Main.hs | mit | 715 | 0 | 12 | 177 | 228 | 119 | 109 | 21 | 1 |
-- (c) The University of Glasgow 2006
-- (c) The GRASP/AQUA Project, Glasgow University, 1992-1998
--
-- The @Class@ datatype
{-# LANGUAGE CPP, DeriveDataTypeable #-}
module ETA.Types.Class (
Class,
ClassOpItem, DefMeth (..),
ClassATItem(..),
ClassMinimalDef,
defMethSpecOfDefMeth,
FunDep, pprFundeps, pprFunDep,
mkClass, classTyVars, classArity,
classKey, className, classATs, classATItems, classTyCon, classMethods,
classOpItems, classBigSig, classExtraBigSig, classTvsFds, classSCTheta,
classAllSelIds, classSCSelId, classMinimalDef
) where
#include "HsVersions.h"
import {-# SOURCE #-} ETA.Types.TyCon ( TyCon, tyConName, tyConUnique )
import {-# SOURCE #-} ETA.Types.TypeRep ( Type, PredType )
import ETA.BasicTypes.Var
import ETA.BasicTypes.Name
import ETA.BasicTypes.BasicTypes
import ETA.BasicTypes.Unique
import ETA.Utils.Util
import ETA.BasicTypes.SrcLoc
import ETA.Utils.Outputable
import ETA.Utils.FastString
import ETA.Utils.BooleanFormula (BooleanFormula)
import Data.Typeable (Typeable)
import qualified Data.Data as Data
{-
************************************************************************
* *
\subsection[Class-basic]{@Class@: basic definition}
* *
************************************************************************
A @Class@ corresponds to a Greek kappa in the static semantics:
-}
data Class
= Class {
classTyCon :: TyCon, -- The data type constructor for
-- dictionaries of this class
-- See Note [ATyCon for classes] in TypeRep
className :: Name, -- Just the cached name of the TyCon
classKey :: Unique, -- Cached unique of TyCon
classTyVars :: [TyVar], -- The class kind and type variables;
-- identical to those of the TyCon
classFunDeps :: [FunDep TyVar], -- The functional dependencies
-- Superclasses: eg: (F a ~ b, F b ~ G a, Eq a, Show b)
-- We need value-level selectors for both the dictionary
-- superclasses and the equality superclasses
classSCTheta :: [PredType], -- Immediate superclasses,
classSCSels :: [Id], -- Selector functions to extract the
-- superclasses from a
-- dictionary of this class
-- Associated types
classATStuff :: [ClassATItem], -- Associated type families
-- Class operations (methods, not superclasses)
classOpStuff :: [ClassOpItem], -- Ordered by tag
-- Minimal complete definition
classMinimalDef :: ClassMinimalDef
}
deriving Typeable
-- | e.g.
--
-- > class C a b c | a b -> c, a c -> b where...
--
-- Here fun-deps are [([a,b],[c]), ([a,c],[b])]
--
-- - 'ApiAnnotation.AnnKeywordId' : 'ApiAnnotation.AnnRarrow'',
-- For details on above see note [Api annotations] in ApiAnnotation
type FunDep a = ([a],[a])
type ClassOpItem = (Id, DefMeth)
-- Selector function; contains unfolding
-- Default-method info
data DefMeth = NoDefMeth -- No default method
| DefMeth Name -- A polymorphic default method
| GenDefMeth Name -- A generic default method
deriving Eq
data ClassATItem
= ATI TyCon -- See Note [Associated type tyvar names]
(Maybe (Type, SrcSpan))
-- Default associated type (if any) from this template
-- Note [Associated type defaults]
type ClassMinimalDef = BooleanFormula Name -- Required methods
-- | Convert a `DefMethSpec` to a `DefMeth`, which discards the name field in
-- the `DefMeth` constructor of the `DefMeth`.
defMethSpecOfDefMeth :: DefMeth -> DefMethSpec
defMethSpecOfDefMeth meth
= case meth of
NoDefMeth -> NoDM
DefMeth _ -> VanillaDM
GenDefMeth _ -> GenericDM
{-
Note [Associated type defaults]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The following is an example of associated type defaults:
class C a where
data D a r
type F x a b :: *
type F p q r = (p,q)->r -- Default
Note that
* The TyCons for the associated types *share type variables* with the
class, so that we can tell which argument positions should be
instantiated in an instance decl. (The first for 'D', the second
for 'F'.)
* We can have default definitions only for *type* families,
not data families
* In the default decl, the "patterns" should all be type variables,
but (in the source language) they don't need to be the same as in
the 'type' decl signature or the class. It's more like a
free-standing 'type instance' declaration.
* HOWEVER, in the internal ClassATItem we rename the RHS to match the
tyConTyVars of the family TyCon. So in the example above we'd get
a ClassATItem of
ATI F ((x,a) -> b)
So the tyConTyVars of the family TyCon bind the free vars of
the default Type rhs
The @mkClass@ function fills in the indirect superclasses.
The SrcSpan is for the entire original declaration.
-}
mkClass :: [TyVar]
-> [([TyVar], [TyVar])]
-> [PredType] -> [Id]
-> [ClassATItem]
-> [ClassOpItem]
-> ClassMinimalDef
-> TyCon
-> Class
mkClass tyvars fds super_classes superdict_sels at_stuff
op_stuff mindef tycon
= Class { classKey = tyConUnique tycon,
className = tyConName tycon,
classTyVars = tyvars,
classFunDeps = fds,
classSCTheta = super_classes,
classSCSels = superdict_sels,
classATStuff = at_stuff,
classOpStuff = op_stuff,
classMinimalDef = mindef,
classTyCon = tycon }
{-
Note [Associated type tyvar names]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The TyCon of an associated type should use the same variable names as its
parent class. Thus
class C a b where
type F b x a :: *
We make F use the same Name for 'a' as C does, and similary 'b'.
The reason for this is when checking instances it's easier to match
them up, to ensure they match. Eg
instance C Int [d] where
type F [d] x Int = ....
we should make sure that the first and third args match the instance
header.
Having the same variables for class and tycon is also used in checkValidRoles
(in TcTyClsDecls) when checking a class's roles.
************************************************************************
* *
\subsection[Class-selectors]{@Class@: simple selectors}
* *
************************************************************************
The rest of these functions are just simple selectors.
-}
classArity :: Class -> Arity
classArity clas = length (classTyVars clas)
-- Could memoise this
classAllSelIds :: Class -> [Id]
-- Both superclass-dictionary and method selectors
classAllSelIds c@(Class {classSCSels = sc_sels})
= sc_sels ++ classMethods c
classSCSelId :: Class -> Int -> Id
-- Get the n'th superclass selector Id
-- where n is 0-indexed, and counts
-- *all* superclasses including equalities
classSCSelId (Class { classSCSels = sc_sels }) n
= ASSERT( n >= 0 && n < length sc_sels )
sc_sels !! n
classMethods :: Class -> [Id]
classMethods (Class {classOpStuff = op_stuff})
= [op_sel | (op_sel, _) <- op_stuff]
classOpItems :: Class -> [ClassOpItem]
classOpItems = classOpStuff
classATs :: Class -> [TyCon]
classATs (Class { classATStuff = at_stuff })
= [tc | ATI tc _ <- at_stuff]
classATItems :: Class -> [ClassATItem]
classATItems = classATStuff
classTvsFds :: Class -> ([TyVar], [FunDep TyVar])
classTvsFds c
= (classTyVars c, classFunDeps c)
classBigSig :: Class -> ([TyVar], [PredType], [Id], [ClassOpItem])
classBigSig (Class {classTyVars = tyvars, classSCTheta = sc_theta,
classSCSels = sc_sels, classOpStuff = op_stuff})
= (tyvars, sc_theta, sc_sels, op_stuff)
classExtraBigSig :: Class -> ([TyVar], [FunDep TyVar], [PredType], [Id], [ClassATItem], [ClassOpItem])
classExtraBigSig (Class {classTyVars = tyvars, classFunDeps = fundeps,
classSCTheta = sc_theta, classSCSels = sc_sels,
classATStuff = ats, classOpStuff = op_stuff})
= (tyvars, fundeps, sc_theta, sc_sels, ats, op_stuff)
{-
************************************************************************
* *
\subsection[Class-instances]{Instance declarations for @Class@}
* *
************************************************************************
We compare @Classes@ by their keys (which include @Uniques@).
-}
instance Eq Class where
c1 == c2 = classKey c1 == classKey c2
c1 /= c2 = classKey c1 /= classKey c2
instance Ord Class where
c1 <= c2 = classKey c1 <= classKey c2
c1 < c2 = classKey c1 < classKey c2
c1 >= c2 = classKey c1 >= classKey c2
c1 > c2 = classKey c1 > classKey c2
compare c1 c2 = classKey c1 `compare` classKey c2
instance Uniquable Class where
getUnique c = classKey c
instance NamedThing Class where
getName clas = className clas
instance Outputable Class where
ppr c = ppr (getName c)
instance Outputable DefMeth where
ppr (DefMeth n) = ptext (sLit "Default method") <+> ppr n
ppr (GenDefMeth n) = ptext (sLit "Generic default method") <+> ppr n
ppr NoDefMeth = empty -- No default method
pprFundeps :: Outputable a => [FunDep a] -> SDoc
pprFundeps [] = empty
pprFundeps fds = hsep (ptext (sLit "|") : punctuate comma (map pprFunDep fds))
pprFunDep :: Outputable a => FunDep a -> SDoc
pprFunDep (us, vs) = hsep [interppSP us, ptext (sLit "->"), interppSP vs]
instance Data.Data Class where
-- don't traverse?
toConstr _ = abstractConstr "Class"
gunfold _ _ = error "gunfold"
dataTypeOf _ = mkNoRepType "Class"
| alexander-at-github/eta | compiler/ETA/Types/Class.hs | bsd-3-clause | 10,352 | 0 | 12 | 2,898 | 1,623 | 935 | 688 | 133 | 3 |
module Network.Wai.Frontend.MonadCGI
( cgiToApp
, cgiToAppGeneric
) where
import Network.Wai
import Network.CGI.Monad
import Network.CGI.Protocol
import Network.HTTP.Types (Status (..))
import Control.Monad.IO.Class (liftIO)
import Data.CaseInsensitive (original)
import qualified Data.Map as Map
import qualified Data.ByteString.Lazy as BS
import qualified Data.ByteString.Char8 as S8
import Control.Arrow (first)
import Data.Char (toUpper)
import Data.String (fromString)
safeRead :: Read a => a -> String -> a
safeRead d s = case reads s of
((x, _):_) -> x
_ -> d
cgiToApp :: CGI CGIResult -> Application
cgiToApp = cgiToAppGeneric id
cgiToAppGeneric :: Monad m
=> (m (Headers, CGIResult) -> IO (Headers, CGIResult))
-> CGIT m CGIResult
-> Application
cgiToAppGeneric toIO cgi env sendResponse = do
input <- lazyRequestBody env
let vars = map (first fixVarName . go) (requestHeaders env)
++ getCgiVars env
(inputs, body') = decodeInput vars input
req = CGIRequest
{ cgiVars = Map.fromList $ vars
, cgiInputs = inputs
, cgiRequestBody = body'
}
(headers'', output') <- liftIO $ toIO $ runCGIT cgi req
let output = case output' of
CGIOutput bs -> bs
CGINothing -> BS.empty
let headers' = map (\(HeaderName x, y) ->
(fromString x, S8.pack y)) headers''
let status' = case lookup (fromString "Status") headers' of
Nothing -> 200
Just s -> safeRead 200 $ S8.unpack s
sendResponse $ responseLBS (Status status' S8.empty) headers' output
where
go (x, y) = (S8.unpack $ original x, S8.unpack y)
fixVarName :: String -> String
fixVarName = ((++) $ "HTTP_") . map fixVarNameChar
fixVarNameChar :: Char -> Char
fixVarNameChar '-' = '_'
fixVarNameChar c = toUpper c
getCgiVars :: Request -> [(String, String)]
getCgiVars e =
[ ("PATH_INFO", S8.unpack $ rawPathInfo e)
, ("REQUEST_METHOD", show $ requestMethod e)
, ("QUERY_STRING",
case S8.unpack $ rawQueryString e of
'?':rest -> rest
x -> x)
]
| creichert/wai | wai-frontend-monadcgi/Network/Wai/Frontend/MonadCGI.hs | mit | 2,266 | 0 | 15 | 666 | 735 | 396 | 339 | 58 | 3 |
module CoverageTable where
import Control.Monad (liftM,forM_)
import Data.List
import Database.MongoDB
import Queries.Coverage
import Queries.Features
main = do
pipe <- runIOE $ connect (host "127.0.0.1")
--features <- liftM reverse $ queryFeatures pipe
let features = ["Company","Cut","Total","Access control","Concurrency"]
++ ["Distribution", "Fault tolerance", "Interaction", "Logging"]
++ ["Mapping","Parallelism", "Parsing", "Persistence", "Serialization"]
++ ["Visualization", "Depth", "Mentoring", "foo"]
coverageAll <- liftM reverse $ queryCoverage pipe
let coverageU = filter (not.null.snd) coverageAll
let coverage = filter (((flip $ elem) hsTitles).fst) coverageU
let covNum = [sum [if elem f fimpl then 1 else 0 | (_, fimpl) <- coverage] | f <- features]
let baseURL = "http://101companies.org/index.php/"
-- head
putStrLn $ "\\tabcolsep 1mm"
putStrLn $ "\\begin{longtable}"
putStrLn $ "{c|c|c|c||c|c|c|c|c|c|c|c|c|c|c|c||c|c|c|}\\hline"
putStrLn $ "\\caption[Coverage of the feature model]{Coverage of the feature model} \\\\"
putStrLn $ "\\cline{2-19} & "
++ " \\multicolumn{3}{c|}{\\textbf{\\href{"++ baseURL ++ "Category:101basics}{basics}}} & "
++ " \\multicolumn{12}{|c|}{\\textbf{\\href{"++ baseURL ++ "Category:101capabilities}{capabilities}}} & "
++ " \\multicolumn{3}{|c|}{\\textbf{\\href{"++ baseURL ++ "Category:101extras}{extras}}}"
++ "\\\\ \\cline{2-19}"
putStrLn $ ""
++ " & "
++ (concat $ intersperse " & " $ map
(\(f,c) -> "\\textbf{\\begin{sideways}\\href{" ++ baseURL ++ "101feature:" ++ f ++ "}{" ++ f++ "}\\end{sideways}}")
(zip features covNum))
++ "\\\\ \\hline"
putStrLn "\\endfirsthead"
putStrLn $ "\\multicolumn{" ++ (show $ length features) ++ "}{c}%"
putStrLn "{{\\bfseries \\tablename\\ \\thetable{} -- continued from previous page}} \\\\"
putStrLn $ "\\hline"
++ " & "
++ (concat $ intersperse " & " $ map
(\(f,c) -> "\\multicolumn{1}{|c|}{\\textbf{\\begin{sideways}" ++ f ++ "\\end{sideways}}}")
(zip features covNum))
++ "\\\\ \\hline"
putStrLn "\\endhead"
putStrLn $ "\\hline \\multicolumn{"++ (show $ length features) ++ "}{|r|}{{Continued on next page}} \\\\ \\hline"
putStrLn "\\endfoot"
putStrLn "\\hline \\hline"
putStrLn "\\endlastfoot"
-- feature coverage
forM_ coverage $ \(title, implFs) -> do
let line = "\\multicolumn{1}{|r|}{\\hyperlink{impl" ++ title ++ "}{"++ title ++ "}}" ++
" " ++
(concat $ map (\f -> symbol $ elem f implFs) features)
putStrLn $ line ++ "\\\\\\hline"
putStrLn "\\hline\\hline"
putStrLn "\\multicolumn{1}{|r|}{\\textbf{Coverage}}"
putStrLn $ concat $
map (\n -> " &\\textbf{" ++ show n ++ "}") covNum
putStrLn "\\\\\\hline"
putStrLn "\\end{longtable}"
symbol :: Bool -> String
symbol True = "&$\\bullet$ "
symbol False = "& "
hsTitles = ["haskellLogger","haskellParser", "haskellConcurrent", "dph", "hdbc", "haskellDB", "hxt", "hxtPickler", "wxHaskell", "haskellCGI", "happstack"] | 101companies/101dev | tools/mongo2Tex/CoverageTable.hs | gpl-3.0 | 3,425 | 4 | 21 | 918 | 806 | 410 | 396 | 61 | 2 |
{-
(c) The GRASP/AQUA Project, Glasgow University, 1993-1998
\section[Specialise]{Stamping out overloading, and (optionally) polymorphism}
-}
{-# LANGUAGE CPP #-}
module Specialise ( specProgram, specUnfolding ) where
#include "HsVersions.h"
import GhcPrelude
import Id
import TcType hiding( substTy )
import Type hiding( substTy, extendTvSubstList )
import Module( Module, HasModule(..) )
import Coercion( Coercion )
import CoreMonad
import qualified CoreSubst
import CoreUnfold
import Var ( isLocalVar )
import VarSet
import VarEnv
import CoreSyn
import Rules
import CoreOpt ( collectBindersPushingCo )
import CoreUtils ( exprIsTrivial, applyTypeToArgs, mkCast )
import CoreFVs
import FV ( InterestingVarFun )
import CoreArity ( etaExpandToJoinPointRule )
import UniqSupply
import Name
import MkId ( voidArgId, voidPrimId )
import Maybes ( catMaybes, isJust )
import MonadUtils ( foldlM )
import BasicTypes
import HscTypes
import Bag
import DynFlags
import Util
import Outputable
import FastString
import State
import UniqDFM
import Control.Monad
import qualified Control.Monad.Fail as MonadFail
{-
************************************************************************
* *
\subsection[notes-Specialise]{Implementation notes [SLPJ, Aug 18 1993]}
* *
************************************************************************
These notes describe how we implement specialisation to eliminate
overloading.
The specialisation pass works on Core
syntax, complete with all the explicit dictionary application,
abstraction and construction as added by the type checker. The
existing type checker remains largely as it is.
One important thought: the {\em types} passed to an overloaded
function, and the {\em dictionaries} passed are mutually redundant.
If the same function is applied to the same type(s) then it is sure to
be applied to the same dictionary(s)---or rather to the same {\em
values}. (The arguments might look different but they will evaluate
to the same value.)
Second important thought: we know that we can make progress by
treating dictionary arguments as static and worth specialising on. So
we can do without binding-time analysis, and instead specialise on
dictionary arguments and no others.
The basic idea
~~~~~~~~~~~~~~
Suppose we have
let f = <f_rhs>
in <body>
and suppose f is overloaded.
STEP 1: CALL-INSTANCE COLLECTION
We traverse <body>, accumulating all applications of f to types and
dictionaries.
(Might there be partial applications, to just some of its types and
dictionaries? In principle yes, but in practice the type checker only
builds applications of f to all its types and dictionaries, so partial
applications could only arise as a result of transformation, and even
then I think it's unlikely. In any case, we simply don't accumulate such
partial applications.)
STEP 2: EQUIVALENCES
So now we have a collection of calls to f:
f t1 t2 d1 d2
f t3 t4 d3 d4
...
Notice that f may take several type arguments. To avoid ambiguity, we
say that f is called at type t1/t2 and t3/t4.
We take equivalence classes using equality of the *types* (ignoring
the dictionary args, which as mentioned previously are redundant).
STEP 3: SPECIALISATION
For each equivalence class, choose a representative (f t1 t2 d1 d2),
and create a local instance of f, defined thus:
f@t1/t2 = <f_rhs> t1 t2 d1 d2
f_rhs presumably has some big lambdas and dictionary lambdas, so lots
of simplification will now result. However we don't actually *do* that
simplification. Rather, we leave it for the simplifier to do. If we
*did* do it, though, we'd get more call instances from the specialised
RHS. We can work out what they are by instantiating the call-instance
set from f's RHS with the types t1, t2.
Add this new id to f's IdInfo, to record that f has a specialised version.
Before doing any of this, check that f's IdInfo doesn't already
tell us about an existing instance of f at the required type/s.
(This might happen if specialisation was applied more than once, or
it might arise from user SPECIALIZE pragmas.)
Recursion
~~~~~~~~~
Wait a minute! What if f is recursive? Then we can't just plug in
its right-hand side, can we?
But it's ok. The type checker *always* creates non-recursive definitions
for overloaded recursive functions. For example:
f x = f (x+x) -- Yes I know its silly
becomes
f a (d::Num a) = let p = +.sel a d
in
letrec fl (y::a) = fl (p y y)
in
fl
We still have recursion for non-overloaded functions which we
specialise, but the recursive call should get specialised to the
same recursive version.
Polymorphism 1
~~~~~~~~~~~~~~
All this is crystal clear when the function is applied to *constant
types*; that is, types which have no type variables inside. But what if
it is applied to non-constant types? Suppose we find a call of f at type
t1/t2. There are two possibilities:
(a) The free type variables of t1, t2 are in scope at the definition point
of f. In this case there's no problem, we proceed just as before. A common
example is as follows. Here's the Haskell:
g y = let f x = x+x
in f y + f y
After typechecking we have
g a (d::Num a) (y::a) = let f b (d'::Num b) (x::b) = +.sel b d' x x
in +.sel a d (f a d y) (f a d y)
Notice that the call to f is at type type "a"; a non-constant type.
Both calls to f are at the same type, so we can specialise to give:
g a (d::Num a) (y::a) = let f@a (x::a) = +.sel a d x x
in +.sel a d (f@a y) (f@a y)
(b) The other case is when the type variables in the instance types
are *not* in scope at the definition point of f. The example we are
working with above is a good case. There are two instances of (+.sel a d),
but "a" is not in scope at the definition of +.sel. Can we do anything?
Yes, we can "common them up", a sort of limited common sub-expression deal.
This would give:
g a (d::Num a) (y::a) = let +.sel@a = +.sel a d
f@a (x::a) = +.sel@a x x
in +.sel@a (f@a y) (f@a y)
This can save work, and can't be spotted by the type checker, because
the two instances of +.sel weren't originally at the same type.
Further notes on (b)
* There are quite a few variations here. For example, the defn of
+.sel could be floated ouside the \y, to attempt to gain laziness.
It certainly mustn't be floated outside the \d because the d has to
be in scope too.
* We don't want to inline f_rhs in this case, because
that will duplicate code. Just commoning up the call is the point.
* Nothing gets added to +.sel's IdInfo.
* Don't bother unless the equivalence class has more than one item!
Not clear whether this is all worth it. It is of course OK to
simply discard call-instances when passing a big lambda.
Polymorphism 2 -- Overloading
~~~~~~~~~~~~~~
Consider a function whose most general type is
f :: forall a b. Ord a => [a] -> b -> b
There is really no point in making a version of g at Int/Int and another
at Int/Bool, because it's only instantiating the type variable "a" which
buys us any efficiency. Since g is completely polymorphic in b there
ain't much point in making separate versions of g for the different
b types.
That suggests that we should identify which of g's type variables
are constrained (like "a") and which are unconstrained (like "b").
Then when taking equivalence classes in STEP 2, we ignore the type args
corresponding to unconstrained type variable. In STEP 3 we make
polymorphic versions. Thus:
f@t1/ = /\b -> <f_rhs> t1 b d1 d2
We do this.
Dictionary floating
~~~~~~~~~~~~~~~~~~~
Consider this
f a (d::Num a) = let g = ...
in
...(let d1::Ord a = Num.Ord.sel a d in g a d1)...
Here, g is only called at one type, but the dictionary isn't in scope at the
definition point for g. Usually the type checker would build a
definition for d1 which enclosed g, but the transformation system
might have moved d1's defn inward. Solution: float dictionary bindings
outwards along with call instances.
Consider
f x = let g p q = p==q
h r s = (r+s, g r s)
in
h x x
Before specialisation, leaving out type abstractions we have
f df x = let g :: Eq a => a -> a -> Bool
g dg p q = == dg p q
h :: Num a => a -> a -> (a, Bool)
h dh r s = let deq = eqFromNum dh
in (+ dh r s, g deq r s)
in
h df x x
After specialising h we get a specialised version of h, like this:
h' r s = let deq = eqFromNum df
in (+ df r s, g deq r s)
But we can't naively make an instance for g from this, because deq is not in scope
at the defn of g. Instead, we have to float out the (new) defn of deq
to widen its scope. Notice that this floating can't be done in advance -- it only
shows up when specialisation is done.
User SPECIALIZE pragmas
~~~~~~~~~~~~~~~~~~~~~~~
Specialisation pragmas can be digested by the type checker, and implemented
by adding extra definitions along with that of f, in the same way as before
f@t1/t2 = <f_rhs> t1 t2 d1 d2
Indeed the pragmas *have* to be dealt with by the type checker, because
only it knows how to build the dictionaries d1 and d2! For example
g :: Ord a => [a] -> [a]
{-# SPECIALIZE f :: [Tree Int] -> [Tree Int] #-}
Here, the specialised version of g is an application of g's rhs to the
Ord dictionary for (Tree Int), which only the type checker can conjure
up. There might not even *be* one, if (Tree Int) is not an instance of
Ord! (All the other specialision has suitable dictionaries to hand
from actual calls.)
Problem. The type checker doesn't have to hand a convenient <f_rhs>, because
it is buried in a complex (as-yet-un-desugared) binding group.
Maybe we should say
f@t1/t2 = f* t1 t2 d1 d2
where f* is the Id f with an IdInfo which says "inline me regardless!".
Indeed all the specialisation could be done in this way.
That in turn means that the simplifier has to be prepared to inline absolutely
any in-scope let-bound thing.
Again, the pragma should permit polymorphism in unconstrained variables:
h :: Ord a => [a] -> b -> b
{-# SPECIALIZE h :: [Int] -> b -> b #-}
We *insist* that all overloaded type variables are specialised to ground types,
(and hence there can be no context inside a SPECIALIZE pragma).
We *permit* unconstrained type variables to be specialised to
- a ground type
- or left as a polymorphic type variable
but nothing in between. So
{-# SPECIALIZE h :: [Int] -> [c] -> [c] #-}
is *illegal*. (It can be handled, but it adds complication, and gains the
programmer nothing.)
SPECIALISING INSTANCE DECLARATIONS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
instance Foo a => Foo [a] where
...
{-# SPECIALIZE instance Foo [Int] #-}
The original instance decl creates a dictionary-function
definition:
dfun.Foo.List :: forall a. Foo a -> Foo [a]
The SPECIALIZE pragma just makes a specialised copy, just as for
ordinary function definitions:
dfun.Foo.List@Int :: Foo [Int]
dfun.Foo.List@Int = dfun.Foo.List Int dFooInt
The information about what instance of the dfun exist gets added to
the dfun's IdInfo in the same way as a user-defined function too.
Automatic instance decl specialisation?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Can instance decls be specialised automatically? It's tricky.
We could collect call-instance information for each dfun, but
then when we specialised their bodies we'd get new call-instances
for ordinary functions; and when we specialised their bodies, we might get
new call-instances of the dfuns, and so on. This all arises because of
the unrestricted mutual recursion between instance decls and value decls.
Still, there's no actual problem; it just means that we may not do all
the specialisation we could theoretically do.
Furthermore, instance decls are usually exported and used non-locally,
so we'll want to compile enough to get those specialisations done.
Lastly, there's no such thing as a local instance decl, so we can
survive solely by spitting out *usage* information, and then reading that
back in as a pragma when next compiling the file. So for now,
we only specialise instance decls in response to pragmas.
SPITTING OUT USAGE INFORMATION
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To spit out usage information we need to traverse the code collecting
call-instance information for all imported (non-prelude?) functions
and data types. Then we equivalence-class it and spit it out.
This is done at the top-level when all the call instances which escape
must be for imported functions and data types.
*** Not currently done ***
Partial specialisation by pragmas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What about partial specialisation:
k :: (Ord a, Eq b) => [a] -> b -> b -> [a]
{-# SPECIALIZE k :: Eq b => [Int] -> b -> b -> [a] #-}
or even
{-# SPECIALIZE k :: Eq b => [Int] -> [b] -> [b] -> [a] #-}
Seems quite reasonable. Similar things could be done with instance decls:
instance (Foo a, Foo b) => Foo (a,b) where
...
{-# SPECIALIZE instance Foo a => Foo (a,Int) #-}
{-# SPECIALIZE instance Foo b => Foo (Int,b) #-}
Ho hum. Things are complex enough without this. I pass.
Requirements for the simplifier
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The simplifier has to be able to take advantage of the specialisation.
* When the simplifier finds an application of a polymorphic f, it looks in
f's IdInfo in case there is a suitable instance to call instead. This converts
f t1 t2 d1 d2 ===> f_t1_t2
Note that the dictionaries get eaten up too!
* Dictionary selection operations on constant dictionaries must be
short-circuited:
+.sel Int d ===> +Int
The obvious way to do this is in the same way as other specialised
calls: +.sel has inside it some IdInfo which tells that if it's applied
to the type Int then it should eat a dictionary and transform to +Int.
In short, dictionary selectors need IdInfo inside them for constant
methods.
* Exactly the same applies if a superclass dictionary is being
extracted:
Eq.sel Int d ===> dEqInt
* Something similar applies to dictionary construction too. Suppose
dfun.Eq.List is the function taking a dictionary for (Eq a) to
one for (Eq [a]). Then we want
dfun.Eq.List Int d ===> dEq.List_Int
Where does the Eq [Int] dictionary come from? It is built in
response to a SPECIALIZE pragma on the Eq [a] instance decl.
In short, dfun Ids need IdInfo with a specialisation for each
constant instance of their instance declaration.
All this uses a single mechanism: the SpecEnv inside an Id
What does the specialisation IdInfo look like?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The SpecEnv of an Id maps a list of types (the template) to an expression
[Type] |-> Expr
For example, if f has this RuleInfo:
[Int, a] -> \d:Ord Int. f' a
it means that we can replace the call
f Int t ===> (\d. f' t)
This chucks one dictionary away and proceeds with the
specialised version of f, namely f'.
What can't be done this way?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There is no way, post-typechecker, to get a dictionary for (say)
Eq a from a dictionary for Eq [a]. So if we find
==.sel [t] d
we can't transform to
eqList (==.sel t d')
where
eqList :: (a->a->Bool) -> [a] -> [a] -> Bool
Of course, we currently have no way to automatically derive
eqList, nor to connect it to the Eq [a] instance decl, but you
can imagine that it might somehow be possible. Taking advantage
of this is permanently ruled out.
Still, this is no great hardship, because we intend to eliminate
overloading altogether anyway!
A note about non-tyvar dictionaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some Ids have types like
forall a,b,c. Eq a -> Ord [a] -> tau
This seems curious at first, because we usually only have dictionary
args whose types are of the form (C a) where a is a type variable.
But this doesn't hold for the functions arising from instance decls,
which sometimes get arguments with types of form (C (T a)) for some
type constructor T.
Should we specialise wrt this compound-type dictionary? We used to say
"no", saying:
"This is a heuristic judgement, as indeed is the fact that we
specialise wrt only dictionaries. We choose *not* to specialise
wrt compound dictionaries because at the moment the only place
they show up is in instance decls, where they are simply plugged
into a returned dictionary. So nothing is gained by specialising
wrt them."
But it is simpler and more uniform to specialise wrt these dicts too;
and in future GHC is likely to support full fledged type signatures
like
f :: Eq [(a,b)] => ...
************************************************************************
* *
\subsubsection{The new specialiser}
* *
************************************************************************
Our basic game plan is this. For let(rec) bound function
f :: (C a, D c) => (a,b,c,d) -> Bool
* Find any specialised calls of f, (f ts ds), where
ts are the type arguments t1 .. t4, and
ds are the dictionary arguments d1 .. d2.
* Add a new definition for f1 (say):
f1 = /\ b d -> (..body of f..) t1 b t3 d d1 d2
Note that we abstract over the unconstrained type arguments.
* Add the mapping
[t1,b,t3,d] |-> \d1 d2 -> f1 b d
to the specialisations of f. This will be used by the
simplifier to replace calls
(f t1 t2 t3 t4) da db
by
(\d1 d1 -> f1 t2 t4) da db
All the stuff about how many dictionaries to discard, and what types
to apply the specialised function to, are handled by the fact that the
SpecEnv contains a template for the result of the specialisation.
We don't build *partial* specialisations for f. For example:
f :: Eq a => a -> a -> Bool
{-# SPECIALISE f :: (Eq b, Eq c) => (b,c) -> (b,c) -> Bool #-}
Here, little is gained by making a specialised copy of f.
There's a distinct danger that the specialised version would
first build a dictionary for (Eq b, Eq c), and then select the (==)
method from it! Even if it didn't, not a great deal is saved.
We do, however, generate polymorphic, but not overloaded, specialisations:
f :: Eq a => [a] -> b -> b -> b
... SPECIALISE f :: [Int] -> b -> b -> b ...
Hence, the invariant is this:
*** no specialised version is overloaded ***
************************************************************************
* *
\subsubsection{The exported function}
* *
************************************************************************
-}
-- | Specialise calls to type-class overloaded functions occuring in a program.
specProgram :: ModGuts -> CoreM ModGuts
specProgram guts@(ModGuts { mg_module = this_mod
, mg_rules = local_rules
, mg_binds = binds })
= do { dflags <- getDynFlags
-- Specialise the bindings of this module
; (binds', uds) <- runSpecM dflags this_mod (go binds)
-- Specialise imported functions
; hpt_rules <- getRuleBase
; let rule_base = extendRuleBaseList hpt_rules local_rules
; (new_rules, spec_binds) <- specImports dflags this_mod top_env emptyVarSet
[] rule_base uds
; let final_binds
| null spec_binds = binds'
| otherwise = Rec (flattenBinds spec_binds) : binds'
-- Note [Glom the bindings if imported functions are specialised]
; return (guts { mg_binds = final_binds
, mg_rules = new_rules ++ local_rules }) }
where
-- We need to start with a Subst that knows all the things
-- that are in scope, so that the substitution engine doesn't
-- accidentally re-use a unique that's already in use
-- Easiest thing is to do it all at once, as if all the top-level
-- decls were mutually recursive
top_env = SE { se_subst = CoreSubst.mkEmptySubst $ mkInScopeSet $ mkVarSet $
bindersOfBinds binds
, se_interesting = emptyVarSet }
go [] = return ([], emptyUDs)
go (bind:binds) = do (binds', uds) <- go binds
(bind', uds') <- specBind top_env bind uds
return (bind' ++ binds', uds')
{-
Note [Wrap bindings returned by specImports]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'specImports' returns a set of specialized bindings. However, these are lacking
necessary floated dictionary bindings, which are returned by
UsageDetails(ud_binds). These dictionaries need to be brought into scope with
'wrapDictBinds' before the bindings returned by 'specImports' can be used. See,
for instance, the 'specImports' call in 'specProgram'.
Note [Disabling cross-module specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since GHC 7.10 we have performed specialisation of INLINABLE bindings living
in modules outside of the current module. This can sometimes uncover user code
which explodes in size when aggressively optimized. The
-fno-cross-module-specialise option was introduced to allow users to being
bitten by such instances to revert to the pre-7.10 behavior.
See Trac #10491
-}
-- | Specialise a set of calls to imported bindings
specImports :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these ones
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module and the home package
-- (but not external packages, which can change)
-> UsageDetails -- Calls for imported things, and floating bindings
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
-- See Note [Wrapping bindings returned by specImports]
specImports dflags this_mod top_env done callers rule_base
(MkUD { ud_binds = dict_binds, ud_calls = calls })
-- See Note [Disabling cross-module specialisation]
| not $ gopt Opt_CrossModuleSpecialise dflags
= return ([], [])
| otherwise
= do { let import_calls = dVarEnvElts calls
; (rules, spec_binds) <- go rule_base import_calls
-- Don't forget to wrap the specialized bindings with
-- bindings for the needed dictionaries.
-- See Note [Wrap bindings returned by specImports]
; let spec_binds' = wrapDictBinds dict_binds spec_binds
; return (rules, spec_binds') }
where
go :: RuleBase -> [CallInfoSet] -> CoreM ([CoreRule], [CoreBind])
go _ [] = return ([], [])
go rb (cis@(CIS fn _) : other_calls)
= do { let ok_calls = filterCalls cis dict_binds
-- Drop calls that (directly or indirectly) refer to fn
-- See Note [Avoiding loops]
-- ; debugTraceMsg (text "specImport" <+> vcat [ ppr fn
-- , text "calls" <+> ppr cis
-- , text "ud_binds =" <+> ppr dict_binds
-- , text "dump set =" <+> ppr dump_set
-- , text "filtered calls =" <+> ppr ok_calls ])
; (rules1, spec_binds1) <- specImport dflags this_mod top_env
done callers rb fn ok_calls
; (rules2, spec_binds2) <- go (extendRuleBaseList rb rules1) other_calls
; return (rules1 ++ rules2, spec_binds1 ++ spec_binds2) }
specImport :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module
-> Id -> [CallInfo] -- Imported function and calls for it
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
specImport dflags this_mod top_env done callers rb fn calls_for_fn
| fn `elemVarSet` done
= return ([], []) -- No warning. This actually happens all the time
-- when specialising a recursive function, because
-- the RHS of the specialised function contains a recursive
-- call to the original function
| null calls_for_fn -- We filtered out all the calls in deleteCallsMentioning
= return ([], [])
| wantSpecImport dflags unfolding
, Just rhs <- maybeUnfoldingTemplate unfolding
= do { -- Get rules from the external package state
-- We keep doing this in case we "page-fault in"
-- more rules as we go along
; hsc_env <- getHscEnv
; eps <- liftIO $ hscEPS hsc_env
; vis_orphs <- getVisibleOrphanMods
; let full_rb = unionRuleBase rb (eps_rule_base eps)
rules_for_fn = getRules (RuleEnv full_rb vis_orphs) fn
; (rules1, spec_pairs, uds)
<- -- pprTrace "specImport1" (vcat [ppr fn, ppr calls_for_fn, ppr rhs]) $
runSpecM dflags this_mod $
specCalls (Just this_mod) top_env rules_for_fn calls_for_fn fn rhs
; let spec_binds1 = [NonRec b r | (b,r) <- spec_pairs]
-- After the rules kick in we may get recursion, but
-- we rely on a global GlomBinds to sort that out later
-- See Note [Glom the bindings if imported functions are specialised]
-- Now specialise any cascaded calls
; (rules2, spec_binds2) <- -- pprTrace "specImport 2" (ppr fn $$ ppr rules1 $$ ppr spec_binds1) $
specImports dflags this_mod top_env
(extendVarSet done fn)
(fn:callers)
(extendRuleBaseList rb rules1)
uds
; let final_binds = spec_binds2 ++ spec_binds1
; return (rules2 ++ rules1, final_binds) }
| warnMissingSpecs dflags callers
= do { warnMsg (vcat [ hang (text "Could not specialise imported function" <+> quotes (ppr fn))
2 (vcat [ text "when specialising" <+> quotes (ppr caller)
| caller <- callers])
, whenPprDebug (text "calls:" <+> vcat (map (pprCallInfo fn) calls_for_fn))
, text "Probable fix: add INLINABLE pragma on" <+> quotes (ppr fn) ])
; return ([], []) }
| otherwise
= return ([], [])
where
unfolding = realIdUnfolding fn -- We want to see the unfolding even for loop breakers
warnMissingSpecs :: DynFlags -> [Id] -> Bool
-- See Note [Warning about missed specialisations]
warnMissingSpecs dflags callers
| wopt Opt_WarnAllMissedSpecs dflags = True
| not (wopt Opt_WarnMissedSpecs dflags) = False
| null callers = False
| otherwise = all has_inline_prag callers
where
has_inline_prag id = isAnyInlinePragma (idInlinePragma id)
wantSpecImport :: DynFlags -> Unfolding -> Bool
-- See Note [Specialise imported INLINABLE things]
wantSpecImport dflags unf
= case unf of
NoUnfolding -> False
BootUnfolding -> False
OtherCon {} -> False
DFunUnfolding {} -> True
CoreUnfolding { uf_src = src, uf_guidance = _guidance }
| gopt Opt_SpecialiseAggressively dflags -> True
| isStableSource src -> True
-- Specialise even INLINE things; it hasn't inlined yet,
-- so perhaps it never will. Moreover it may have calls
-- inside it that we want to specialise
| otherwise -> False -- Stable, not INLINE, hence INLINABLE
{- Note [Warning about missed specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose
* In module Lib, you carefully mark a function 'foo' INLINABLE
* Import Lib(foo) into another module M
* Call 'foo' at some specialised type in M
Then you jolly well expect it to be specialised in M. But what if
'foo' calls another function 'Lib.bar'. Then you'd like 'bar' to be
specialised too. But if 'bar' is not marked INLINABLE it may well
not be specialised. The warning Opt_WarnMissedSpecs warns about this.
It's more noisy to warning about a missed specialisation opportunity
for /every/ overloaded imported function, but sometimes useful. That
is what Opt_WarnAllMissedSpecs does.
ToDo: warn about missed opportunities for local functions.
Note [Specialise imported INLINABLE things]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What imported functions do we specialise? The basic set is
* DFuns and things with INLINABLE pragmas.
but with -fspecialise-aggressively we add
* Anything with an unfolding template
Trac #8874 has a good example of why we want to auto-specialise DFuns.
We have the -fspecialise-aggressively flag (usually off), because we
risk lots of orphan modules from over-vigorous specialisation.
However it's not a big deal: anything non-recursive with an
unfolding-template will probably have been inlined already.
Note [Glom the bindings if imported functions are specialised]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we have an imported, *recursive*, INLINABLE function
f :: Eq a => a -> a
f = /\a \d x. ...(f a d)...
In the module being compiled we have
g x = f (x::Int)
Now we'll make a specialised function
f_spec :: Int -> Int
f_spec = \x -> ...(f Int dInt)...
{-# RULE f Int _ = f_spec #-}
g = \x. f Int dInt x
Note that f_spec doesn't look recursive
After rewriting with the RULE, we get
f_spec = \x -> ...(f_spec)...
BUT since f_spec was non-recursive before it'll *stay* non-recursive.
The occurrence analyser never turns a NonRec into a Rec. So we must
make sure that f_spec is recursive. Easiest thing is to make all
the specialisations for imported bindings recursive.
Note [Avoiding recursive specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise 'f' we may find new overloaded calls to 'g', 'h' in
'f's RHS. So we want to specialise g,h. But we don't want to
specialise f any more! It's possible that f's RHS might have a
recursive yet-more-specialised call, so we'd diverge in that case.
And if the call is to the same type, one specialisation is enough.
Avoiding this recursive specialisation loop is the reason for the
'done' VarSet passed to specImports and specImport.
************************************************************************
* *
\subsubsection{@specExpr@: the main function}
* *
************************************************************************
-}
data SpecEnv
= SE { se_subst :: CoreSubst.Subst
-- We carry a substitution down:
-- a) we must clone any binding that might float outwards,
-- to avoid name clashes
-- b) we carry a type substitution to use when analysing
-- the RHS of specialised bindings (no type-let!)
, se_interesting :: VarSet
-- Dict Ids that we know something about
-- and hence may be worth specialising against
-- See Note [Interesting dictionary arguments]
}
specVar :: SpecEnv -> Id -> CoreExpr
specVar env v = CoreSubst.lookupIdSubst (text "specVar") (se_subst env) v
specExpr :: SpecEnv -> CoreExpr -> SpecM (CoreExpr, UsageDetails)
---------------- First the easy cases --------------------
specExpr env (Type ty) = return (Type (substTy env ty), emptyUDs)
specExpr env (Coercion co) = return (Coercion (substCo env co), emptyUDs)
specExpr env (Var v) = return (specVar env v, emptyUDs)
specExpr _ (Lit lit) = return (Lit lit, emptyUDs)
specExpr env (Cast e co)
= do { (e', uds) <- specExpr env e
; return ((mkCast e' (substCo env co)), uds) }
specExpr env (Tick tickish body)
= do { (body', uds) <- specExpr env body
; return (Tick (specTickish env tickish) body', uds) }
---------------- Applications might generate a call instance --------------------
specExpr env expr@(App {})
= go expr []
where
go (App fun arg) args = do (arg', uds_arg) <- specExpr env arg
(fun', uds_app) <- go fun (arg':args)
return (App fun' arg', uds_arg `plusUDs` uds_app)
go (Var f) args = case specVar env f of
Var f' -> return (Var f', mkCallUDs env f' args)
e' -> return (e', emptyUDs) -- I don't expect this!
go other _ = specExpr env other
---------------- Lambda/case require dumping of usage details --------------------
specExpr env e@(Lam _ _) = do
(body', uds) <- specExpr env' body
let (free_uds, dumped_dbs) = dumpUDs bndrs' uds
return (mkLams bndrs' (wrapDictBindsE dumped_dbs body'), free_uds)
where
(bndrs, body) = collectBinders e
(env', bndrs') = substBndrs env bndrs
-- More efficient to collect a group of binders together all at once
-- and we don't want to split a lambda group with dumped bindings
specExpr env (Case scrut case_bndr ty alts)
= do { (scrut', scrut_uds) <- specExpr env scrut
; (scrut'', case_bndr', alts', alts_uds)
<- specCase env scrut' case_bndr alts
; return (Case scrut'' case_bndr' (substTy env ty) alts'
, scrut_uds `plusUDs` alts_uds) }
---------------- Finally, let is the interesting case --------------------
specExpr env (Let bind body)
= do { -- Clone binders
(rhs_env, body_env, bind') <- cloneBindSM env bind
-- Deal with the body
; (body', body_uds) <- specExpr body_env body
-- Deal with the bindings
; (binds', uds) <- specBind rhs_env bind' body_uds
-- All done
; return (foldr Let body' binds', uds) }
specTickish :: SpecEnv -> Tickish Id -> Tickish Id
specTickish env (Breakpoint ix ids)
= Breakpoint ix [ id' | id <- ids, Var id' <- [specVar env id]]
-- drop vars from the list if they have a non-variable substitution.
-- should never happen, but it's harmless to drop them anyway.
specTickish _ other_tickish = other_tickish
specCase :: SpecEnv
-> CoreExpr -- Scrutinee, already done
-> Id -> [CoreAlt]
-> SpecM ( CoreExpr -- New scrutinee
, Id
, [CoreAlt]
, UsageDetails)
specCase env scrut' case_bndr [(con, args, rhs)]
| isDictId case_bndr -- See Note [Floating dictionaries out of cases]
, interestingDict env scrut'
, not (isDeadBinder case_bndr && null sc_args')
= do { (case_bndr_flt : sc_args_flt) <- mapM clone_me (case_bndr' : sc_args')
; let sc_rhss = [ Case (Var case_bndr_flt) case_bndr' (idType sc_arg')
[(con, args', Var sc_arg')]
| sc_arg' <- sc_args' ]
-- Extend the substitution for RHS to map the *original* binders
-- to their floated verions.
mb_sc_flts :: [Maybe DictId]
mb_sc_flts = map (lookupVarEnv clone_env) args'
clone_env = zipVarEnv sc_args' sc_args_flt
subst_prs = (case_bndr, Var case_bndr_flt)
: [ (arg, Var sc_flt)
| (arg, Just sc_flt) <- args `zip` mb_sc_flts ]
env_rhs' = env_rhs { se_subst = CoreSubst.extendIdSubstList (se_subst env_rhs) subst_prs
, se_interesting = se_interesting env_rhs `extendVarSetList`
(case_bndr_flt : sc_args_flt) }
; (rhs', rhs_uds) <- specExpr env_rhs' rhs
; let scrut_bind = mkDB (NonRec case_bndr_flt scrut')
case_bndr_set = unitVarSet case_bndr_flt
sc_binds = [(NonRec sc_arg_flt sc_rhs, case_bndr_set)
| (sc_arg_flt, sc_rhs) <- sc_args_flt `zip` sc_rhss ]
flt_binds = scrut_bind : sc_binds
(free_uds, dumped_dbs) = dumpUDs (case_bndr':args') rhs_uds
all_uds = flt_binds `addDictBinds` free_uds
alt' = (con, args', wrapDictBindsE dumped_dbs rhs')
; return (Var case_bndr_flt, case_bndr', [alt'], all_uds) }
where
(env_rhs, (case_bndr':args')) = substBndrs env (case_bndr:args)
sc_args' = filter is_flt_sc_arg args'
clone_me bndr = do { uniq <- getUniqueM
; return (mkUserLocalOrCoVar occ uniq ty loc) }
where
name = idName bndr
ty = idType bndr
occ = nameOccName name
loc = getSrcSpan name
arg_set = mkVarSet args'
is_flt_sc_arg var = isId var
&& not (isDeadBinder var)
&& isDictTy var_ty
&& not (tyCoVarsOfType var_ty `intersectsVarSet` arg_set)
where
var_ty = idType var
specCase env scrut case_bndr alts
= do { (alts', uds_alts) <- mapAndCombineSM spec_alt alts
; return (scrut, case_bndr', alts', uds_alts) }
where
(env_alt, case_bndr') = substBndr env case_bndr
spec_alt (con, args, rhs) = do
(rhs', uds) <- specExpr env_rhs rhs
let (free_uds, dumped_dbs) = dumpUDs (case_bndr' : args') uds
return ((con, args', wrapDictBindsE dumped_dbs rhs'), free_uds)
where
(env_rhs, args') = substBndrs env_alt args
{-
Note [Floating dictionaries out of cases]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
g = \d. case d of { MkD sc ... -> ...(f sc)... }
Naively we can't float d2's binding out of the case expression,
because 'sc' is bound by the case, and that in turn means we can't
specialise f, which seems a pity.
So we invert the case, by floating out a binding
for 'sc_flt' thus:
sc_flt = case d of { MkD sc ... -> sc }
Now we can float the call instance for 'f'. Indeed this is just
what'll happen if 'sc' was originally bound with a let binding,
but case is more efficient, and necessary with equalities. So it's
good to work with both.
You might think that this won't make any difference, because the
call instance will only get nuked by the \d. BUT if 'g' itself is
specialised, then transitively we should be able to specialise f.
In general, given
case e of cb { MkD sc ... -> ...(f sc)... }
we transform to
let cb_flt = e
sc_flt = case cb_flt of { MkD sc ... -> sc }
in
case cb_flt of bg { MkD sc ... -> ....(f sc_flt)... }
The "_flt" things are the floated binds; we use the current substitution
to substitute sc -> sc_flt in the RHS
************************************************************************
* *
Dealing with a binding
* *
************************************************************************
-}
specBind :: SpecEnv -- Use this for RHSs
-> CoreBind -- Binders are already cloned by cloneBindSM,
-- but RHSs are un-processed
-> UsageDetails -- Info on how the scope of the binding
-> SpecM ([CoreBind], -- New bindings
UsageDetails) -- And info to pass upstream
-- Returned UsageDetails:
-- No calls for binders of this bind
specBind rhs_env (NonRec fn rhs) body_uds
= do { (rhs', rhs_uds) <- specExpr rhs_env rhs
; (fn', spec_defns, body_uds1) <- specDefn rhs_env body_uds fn rhs
; let pairs = spec_defns ++ [(fn', rhs')]
-- fn' mentions the spec_defns in its rules,
-- so put the latter first
combined_uds = body_uds1 `plusUDs` rhs_uds
(free_uds, dump_dbs, float_all) = dumpBindUDs [fn] combined_uds
final_binds :: [DictBind]
-- See Note [From non-recursive to recursive]
final_binds
| not (isEmptyBag dump_dbs)
, not (null spec_defns)
= [recWithDumpedDicts pairs dump_dbs]
| otherwise
= [mkDB $ NonRec b r | (b,r) <- pairs]
++ bagToList dump_dbs
; if float_all then
-- Rather than discard the calls mentioning the bound variables
-- we float this (dictionary) binding along with the others
return ([], free_uds `snocDictBinds` final_binds)
else
-- No call in final_uds mentions bound variables,
-- so we can just leave the binding here
return (map fst final_binds, free_uds) }
specBind rhs_env (Rec pairs) body_uds
-- Note [Specialising a recursive group]
= do { let (bndrs,rhss) = unzip pairs
; (rhss', rhs_uds) <- mapAndCombineSM (specExpr rhs_env) rhss
; let scope_uds = body_uds `plusUDs` rhs_uds
-- Includes binds and calls arising from rhss
; (bndrs1, spec_defns1, uds1) <- specDefns rhs_env scope_uds pairs
; (bndrs3, spec_defns3, uds3)
<- if null spec_defns1 -- Common case: no specialisation
then return (bndrs1, [], uds1)
else do { -- Specialisation occurred; do it again
(bndrs2, spec_defns2, uds2)
<- specDefns rhs_env uds1 (bndrs1 `zip` rhss)
; return (bndrs2, spec_defns2 ++ spec_defns1, uds2) }
; let (final_uds, dumped_dbs, float_all) = dumpBindUDs bndrs uds3
final_bind = recWithDumpedDicts (spec_defns3 ++ zip bndrs3 rhss')
dumped_dbs
; if float_all then
return ([], final_uds `snocDictBind` final_bind)
else
return ([fst final_bind], final_uds) }
---------------------------
specDefns :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> [(OutId,InExpr)] -- The things being bound and their un-processed RHS
-> SpecM ([OutId], -- Original Ids with RULES added
[(OutId,OutExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
-- Specialise a list of bindings (the contents of a Rec), but flowing usages
-- upwards binding by binding. Example: { f = ...g ...; g = ...f .... }
-- Then if the input CallDetails has a specialised call for 'g', whose specialisation
-- in turn generates a specialised call for 'f', we catch that in this one sweep.
-- But not vice versa (it's a fixpoint problem).
specDefns _env uds []
= return ([], [], uds)
specDefns env uds ((bndr,rhs):pairs)
= do { (bndrs1, spec_defns1, uds1) <- specDefns env uds pairs
; (bndr1, spec_defns2, uds2) <- specDefn env uds1 bndr rhs
; return (bndr1 : bndrs1, spec_defns1 ++ spec_defns2, uds2) }
---------------------------
specDefn :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> OutId -> InExpr -- The thing being bound and its un-processed RHS
-> SpecM (Id, -- Original Id with added RULES
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
specDefn env body_uds fn rhs
= do { let (body_uds_without_me, calls_for_me) = callsForMe fn body_uds
rules_for_me = idCoreRules fn
; (rules, spec_defns, spec_uds) <- specCalls Nothing env rules_for_me
calls_for_me fn rhs
; return ( fn `addIdSpecialisations` rules
, spec_defns
, body_uds_without_me `plusUDs` spec_uds) }
-- It's important that the `plusUDs` is this way
-- round, because body_uds_without_me may bind
-- dictionaries that are used in calls_for_me passed
-- to specDefn. So the dictionary bindings in
-- spec_uds may mention dictionaries bound in
-- body_uds_without_me
---------------------------
specCalls :: Maybe Module -- Just this_mod => specialising imported fn
-- Nothing => specialising local fn
-> SpecEnv
-> [CoreRule] -- Existing RULES for the fn
-> [CallInfo]
-> OutId -> InExpr
-> SpecM SpecInfo -- New rules, specialised bindings, and usage details
-- This function checks existing rules, and does not create
-- duplicate ones. So the caller does not need to do this filtering.
-- See 'already_covered'
type SpecInfo = ( [CoreRule] -- Specialisation rules
, [(Id,CoreExpr)] -- Specialised definition
, UsageDetails ) -- Usage details from specialised RHSs
specCalls mb_mod env existing_rules calls_for_me fn rhs
-- The first case is the interesting one
| rhs_tyvars `lengthIs` n_tyvars -- Rhs of fn's defn has right number of big lambdas
&& rhs_bndrs1 `lengthAtLeast` n_dicts -- and enough dict args
&& notNull calls_for_me -- And there are some calls to specialise
&& not (isNeverActive (idInlineActivation fn))
-- Don't specialise NOINLINE things
-- See Note [Auto-specialisation and RULES]
-- && not (certainlyWillInline (idUnfolding fn)) -- And it's not small
-- See Note [Inline specialisation] for why we do not
-- switch off specialisation for inline functions
= -- pprTrace "specDefn: some" (ppr fn $$ ppr calls_for_me $$ ppr existing_rules) $
foldlM spec_call ([], [], emptyUDs) calls_for_me
| otherwise -- No calls or RHS doesn't fit our preconceptions
= WARN( not (exprIsTrivial rhs) && notNull calls_for_me,
text "Missed specialisation opportunity for"
<+> ppr fn $$ _trace_doc )
-- Note [Specialisation shape]
-- pprTrace "specDefn: none" (ppr fn <+> ppr calls_for_me) $
return ([], [], emptyUDs)
where
_trace_doc = sep [ ppr rhs_tyvars, ppr n_tyvars
, ppr rhs_bndrs, ppr n_dicts
, ppr (idInlineActivation fn) ]
fn_type = idType fn
fn_arity = idArity fn
fn_unf = realIdUnfolding fn -- Ignore loop-breaker-ness here
(tyvars, theta, _) = tcSplitSigmaTy fn_type
n_tyvars = length tyvars
n_dicts = length theta
inl_prag = idInlinePragma fn
inl_act = inlinePragmaActivation inl_prag
is_local = isLocalId fn
-- Figure out whether the function has an INLINE pragma
-- See Note [Inline specialisations]
(rhs_bndrs, rhs_body) = collectBindersPushingCo rhs
-- See Note [Account for casts in binding]
(rhs_tyvars, rhs_bndrs1) = span isTyVar rhs_bndrs
(rhs_dict_ids, rhs_bndrs2) = splitAt n_dicts rhs_bndrs1
body = mkLams rhs_bndrs2 rhs_body
-- Glue back on the non-dict lambdas
in_scope = CoreSubst.substInScope (se_subst env)
already_covered :: DynFlags -> [CoreRule] -> [CoreExpr] -> Bool
already_covered dflags new_rules args -- Note [Specialisations already covered]
= isJust (lookupRule dflags (in_scope, realIdUnfolding)
(const True) fn args
(new_rules ++ existing_rules))
-- NB: we look both in the new_rules (generated by this invocation
-- of specCalls), and in existing_rules (passed in to specCalls)
mk_ty_args :: [Maybe Type] -> [TyVar] -> [CoreExpr]
mk_ty_args [] poly_tvs
= ASSERT( null poly_tvs ) []
mk_ty_args (Nothing : call_ts) (poly_tv : poly_tvs)
= Type (mkTyVarTy poly_tv) : mk_ty_args call_ts poly_tvs
mk_ty_args (Just ty : call_ts) poly_tvs
= Type ty : mk_ty_args call_ts poly_tvs
mk_ty_args (Nothing : _) [] = panic "mk_ty_args"
----------------------------------------------------------
-- Specialise to one particular call pattern
spec_call :: SpecInfo -- Accumulating parameter
-> CallInfo -- Call instance
-> SpecM SpecInfo
spec_call spec_acc@(rules_acc, pairs_acc, uds_acc)
(CI { ci_key = CallKey call_ts, ci_args = call_ds })
= ASSERT( call_ts `lengthIs` n_tyvars && call_ds `lengthIs` n_dicts )
-- Suppose f's defn is f = /\ a b c -> \ d1 d2 -> rhs
-- Suppose the call is for f [Just t1, Nothing, Just t3] [dx1, dx2]
-- Construct the new binding
-- f1 = SUBST[a->t1,c->t3, d1->d1', d2->d2'] (/\ b -> rhs)
-- PLUS the rule
-- RULE "SPEC f" forall b d1' d2'. f b d1' d2' = f1 b
-- In the rule, d1' and d2' are just wildcards, not used in the RHS
-- PLUS the usage-details
-- { d1' = dx1; d2' = dx2 }
-- where d1', d2' are cloned versions of d1,d2, with the type substitution
-- applied. These auxiliary bindings just avoid duplication of dx1, dx2
--
-- Note that the substitution is applied to the whole thing.
-- This is convenient, but just slightly fragile. Notably:
-- * There had better be no name clashes in a/b/c
do { let
-- poly_tyvars = [b] in the example above
-- spec_tyvars = [a,c]
-- ty_args = [t1,b,t3]
spec_tv_binds = [(tv,ty) | (tv, Just ty) <- rhs_tyvars `zip` call_ts]
env1 = extendTvSubstList env spec_tv_binds
(rhs_env, poly_tyvars) = substBndrs env1
[tv | (tv, Nothing) <- rhs_tyvars `zip` call_ts]
-- Clone rhs_dicts, including instantiating their types
; inst_dict_ids <- mapM (newDictBndr rhs_env) rhs_dict_ids
; let (rhs_env2, dx_binds, spec_dict_args)
= bindAuxiliaryDicts rhs_env rhs_dict_ids call_ds inst_dict_ids
ty_args = mk_ty_args call_ts poly_tyvars
ev_args = map varToCoreExpr inst_dict_ids -- ev_args, ev_bndrs:
ev_bndrs = exprsFreeIdsList ev_args -- See Note [Evidence foralls]
rule_args = ty_args ++ ev_args
rule_bndrs = poly_tyvars ++ ev_bndrs
; dflags <- getDynFlags
; if already_covered dflags rules_acc rule_args
then return spec_acc
else -- pprTrace "spec_call" (vcat [ ppr _call_info, ppr fn, ppr rhs_dict_ids
-- , text "rhs_env2" <+> ppr (se_subst rhs_env2)
-- , ppr dx_binds ]) $
do
{ -- Figure out the type of the specialised function
let body_ty = applyTypeToArgs rhs fn_type rule_args
(lam_args, app_args) -- Add a dummy argument if body_ty is unlifted
| isUnliftedType body_ty -- C.f. WwLib.mkWorkerArgs
, not (isJoinId fn)
= (poly_tyvars ++ [voidArgId], poly_tyvars ++ [voidPrimId])
| otherwise = (poly_tyvars, poly_tyvars)
spec_id_ty = mkLamTypes lam_args body_ty
join_arity_change = length app_args - length rule_args
spec_join_arity | Just orig_join_arity <- isJoinId_maybe fn
= Just (orig_join_arity + join_arity_change)
| otherwise
= Nothing
; spec_f <- newSpecIdSM fn spec_id_ty spec_join_arity
; (spec_rhs, rhs_uds) <- specExpr rhs_env2 (mkLams lam_args body)
; this_mod <- getModule
; let
-- The rule to put in the function's specialisation is:
-- forall b, d1',d2'. f t1 b t3 d1' d2' = f1 b
herald = case mb_mod of
Nothing -- Specialising local fn
-> text "SPEC"
Just this_mod -- Specialising imported fn
-> text "SPEC/" <> ppr this_mod
rule_name = mkFastString $ showSDoc dflags $
herald <+> ftext (occNameFS (getOccName fn))
<+> hsep (map ppr_call_key_ty call_ts)
-- This name ends up in interface files, so use occNameString.
-- Otherwise uniques end up there, making builds
-- less deterministic (See #4012 comment:61 ff)
rule_wout_eta = mkRule
this_mod
True {- Auto generated -}
is_local
rule_name
inl_act -- Note [Auto-specialisation and RULES]
(idName fn)
rule_bndrs
rule_args
(mkVarApps (Var spec_f) app_args)
spec_rule
= case isJoinId_maybe fn of
Just join_arity -> etaExpandToJoinPointRule join_arity
rule_wout_eta
Nothing -> rule_wout_eta
-- Add the { d1' = dx1; d2' = dx2 } usage stuff
spec_uds = foldr consDictBind rhs_uds dx_binds
--------------------------------------
-- Add a suitable unfolding if the spec_inl_prag says so
-- See Note [Inline specialisations]
(spec_inl_prag, spec_unf)
| not is_local && isStrongLoopBreaker (idOccInfo fn)
= (neverInlinePragma, noUnfolding)
-- See Note [Specialising imported functions] in OccurAnal
| InlinePragma { inl_inline = Inlinable } <- inl_prag
= (inl_prag { inl_inline = NoUserInline }, noUnfolding)
| otherwise
= (inl_prag, specUnfolding poly_tyvars spec_app
arity_decrease fn_unf)
arity_decrease = length spec_dict_args
spec_app e = (e `mkApps` ty_args) `mkApps` spec_dict_args
--------------------------------------
-- Adding arity information just propagates it a bit faster
-- See Note [Arity decrease] in Simplify
-- Copy InlinePragma information from the parent Id.
-- So if f has INLINE[1] so does spec_f
spec_f_w_arity = spec_f `setIdArity` max 0 (fn_arity - n_dicts)
`setInlinePragma` spec_inl_prag
`setIdUnfolding` spec_unf
`asJoinId_maybe` spec_join_arity
; return ( spec_rule : rules_acc
, (spec_f_w_arity, spec_rhs) : pairs_acc
, spec_uds `plusUDs` uds_acc
) } }
{- Note [Account for casts in binding]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
f :: Eq a => a -> IO ()
{-# INLINABLE f
StableUnf = (/\a \(d:Eq a) (x:a). blah) |> g
#-}
f = ...
In f's stable unfolding we have done some modest simplification which
has pushed the cast to the outside. (I wonder if this is the Right
Thing, but it's what happens now; see SimplUtils Note [Casts and
lambdas].) Now that stable unfolding must be specialised, so we want
to push the cast back inside. It would be terrible if the cast
defeated specialisation! Hence the use of collectBindersPushingCo.
Note [Evidence foralls]
~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose (Trac #12212) that we are specialising
f :: forall a b. (Num a, F a ~ F b) => blah
with a=b=Int. Then the RULE will be something like
RULE forall (d:Num Int) (g :: F Int ~ F Int).
f Int Int d g = f_spec
But both varToCoreExpr (when constructing the LHS args), and the
simplifier (when simplifying the LHS args), will transform to
RULE forall (d:Num Int) (g :: F Int ~ F Int).
f Int Int d <F Int> = f_spec
by replacing g with Refl. So now 'g' is unbound, which results in a later
crash. So we use Refl right off the bat, and do not forall-quantify 'g':
* varToCoreExpr generates a Refl
* exprsFreeIdsList returns the Ids bound by the args,
which won't include g
You might wonder if this will match as often, but the simplifier replaces
complicated Refl coercions with Refl pretty aggressively.
Note [Orphans and auto-generated rules]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise an INLINABLE function, or when we have
-fspecialise-aggressively, we auto-generate RULES that are orphans.
We don't want to warn about these, or we'd generate a lot of warnings.
Thus, we only warn about user-specified orphan rules.
Indeed, we don't even treat the module as an orphan module if it has
auto-generated *rule* orphans. Orphan modules are read every time we
compile, so they are pretty obtrusive and slow down every compilation,
even non-optimised ones. (Reason: for type class instances it's a
type correctness issue.) But specialisation rules are strictly for
*optimisation* only so it's fine not to read the interface.
What this means is that a SPEC rules from auto-specialisation in
module M will be used in other modules only if M.hi has been read for
some other reason, which is actually pretty likely.
-}
bindAuxiliaryDicts
:: SpecEnv
-> [DictId] -> [CoreExpr] -- Original dict bndrs, and the witnessing expressions
-> [DictId] -- A cloned dict-id for each dict arg
-> (SpecEnv, -- Substitute for all orig_dicts
[DictBind], -- Auxiliary dict bindings
[CoreExpr]) -- Witnessing expressions (all trivial)
-- Bind any dictionary arguments to fresh names, to preserve sharing
bindAuxiliaryDicts env@(SE { se_subst = subst, se_interesting = interesting })
orig_dict_ids call_ds inst_dict_ids
= (env', dx_binds, spec_dict_args)
where
(dx_binds, spec_dict_args) = go call_ds inst_dict_ids
env' = env { se_subst = subst `CoreSubst.extendSubstList`
(orig_dict_ids `zip` spec_dict_args)
`CoreSubst.extendInScopeList` dx_ids
, se_interesting = interesting `unionVarSet` interesting_dicts }
dx_ids = [dx_id | (NonRec dx_id _, _) <- dx_binds]
interesting_dicts = mkVarSet [ dx_id | (NonRec dx_id dx, _) <- dx_binds
, interestingDict env dx ]
-- See Note [Make the new dictionaries interesting]
go :: [CoreExpr] -> [CoreBndr] -> ([DictBind], [CoreExpr])
go [] _ = ([], [])
go (dx:dxs) (dx_id:dx_ids)
| exprIsTrivial dx = (dx_binds, dx : args)
| otherwise = (mkDB (NonRec dx_id dx) : dx_binds, Var dx_id : args)
where
(dx_binds, args) = go dxs dx_ids
-- In the first case extend the substitution but not bindings;
-- in the latter extend the bindings but not the substitution.
-- For the former, note that we bind the *original* dict in the substitution,
-- overriding any d->dx_id binding put there by substBndrs
go _ _ = pprPanic "bindAuxiliaryDicts" (ppr orig_dict_ids $$ ppr call_ds $$ ppr inst_dict_ids)
{-
Note [Make the new dictionaries interesting]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Important! We're going to substitute dx_id1 for d
and we want it to look "interesting", else we won't gather *any*
consequential calls. E.g.
f d = ...g d....
If we specialise f for a call (f (dfun dNumInt)), we'll get
a consequent call (g d') with an auxiliary definition
d' = df dNumInt
We want that consequent call to look interesting
Note [From non-recursive to recursive]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Even in the non-recursive case, if any dict-binds depend on 'fn' we might
have built a recursive knot
f a d x = <blah>
MkUD { ud_binds = NonRec d7 (MkD ..f..)
, ud_calls = ...(f T d7)... }
The we generate
Rec { fs x = <blah>[T/a, d7/d]
f a d x = <blah>
RULE f T _ = fs
d7 = ...f... }
Here the recursion is only through the RULE.
However we definitely should /not/ make the Rec in this wildly common
case:
d = ...
MkUD { ud_binds = NonRec d7 (...d...)
, ud_calls = ...(f T d7)... }
Here we want simply to add d to the floats, giving
MkUD { ud_binds = NonRec d (...)
NonRec d7 (...d...)
, ud_calls = ...(f T d7)... }
In general, we need only make this Rec if
- there are some specialisations (spec_binds non-empty)
- there are some dict_binds that depend on f (dump_dbs non-empty)
Note [Avoiding loops]
~~~~~~~~~~~~~~~~~~~~~
When specialising /dictionary functions/ we must be very careful to
avoid building loops. Here is an example that bit us badly: Trac #3591
class Eq a => C a
instance Eq [a] => C [a]
This translates to
dfun :: Eq [a] -> C [a]
dfun a d = MkD a d (meth d)
d4 :: Eq [T] = <blah>
d2 :: C [T] = dfun T d4
d1 :: Eq [T] = $p1 d2
d3 :: C [T] = dfun T d1
None of these definitions is recursive. What happened was that we
generated a specialisation:
RULE forall d. dfun T d = dT :: C [T]
dT = (MkD a d (meth d)) [T/a, d1/d]
= MkD T d1 (meth d1)
But now we use the RULE on the RHS of d2, to get
d2 = dT = MkD d1 (meth d1)
d1 = $p1 d2
and now d1 is bottom! The problem is that when specialising 'dfun' we
should first dump "below" the binding all floated dictionary bindings
that mention 'dfun' itself. So d2 and d3 (and hence d1) must be
placed below 'dfun', and thus unavailable to it when specialising
'dfun'. That in turn means that the call (dfun T d1) must be
discarded. On the other hand, the call (dfun T d4) is fine, assuming
d4 doesn't mention dfun.
Solution:
Discard all calls that mention dictionaries that depend
(directly or indirectly) on the dfun we are specialising.
This is done by 'filterCalls'
--------------
Here's another example, this time for an imported dfun, so the call
to filterCalls is in specImports (Trac #13429). Suppose we have
class Monoid v => C v a where ...
We start with a call
f @ [Integer] @ Integer $fC[]Integer
Specialising call to 'f' gives dict bindings
$dMonoid_1 :: Monoid [Integer]
$dMonoid_1 = M.$p1C @ [Integer] $fC[]Integer
$dC_1 :: C [Integer] (Node [Integer] Integer)
$dC_1 = M.$fCvNode @ [Integer] $dMonoid_1
...plus a recursive call to
f @ [Integer] @ (Node [Integer] Integer) $dC_1
Specialising that call gives
$dMonoid_2 :: Monoid [Integer]
$dMonoid_2 = M.$p1C @ [Integer] $dC_1
$dC_2 :: C [Integer] (Node [Integer] Integer)
$dC_2 = M.$fCvNode @ [Integer] $dMonoid_2
Now we have two calls to the imported function
M.$fCvNode :: Monoid v => C v a
M.$fCvNode @v @a m = C m some_fun
But we must /not/ use the call (M.$fCvNode @ [Integer] $dMonoid_2)
for specialisation, else we get:
$dC_1 = M.$fCvNode @ [Integer] $dMonoid_1
$dMonoid_2 = M.$p1C @ [Integer] $dC_1
$s$fCvNode = C $dMonoid_2 ...
RULE M.$fCvNode [Integer] _ _ = $s$fCvNode
Now use the rule to rewrite the call in the RHS of $dC_1
and we get a loop!
--------------
Here's yet another example
class C a where { foo,bar :: [a] -> [a] }
instance C Int where
foo x = r_bar x
bar xs = reverse xs
r_bar :: C a => [a] -> [a]
r_bar xs = bar (xs ++ xs)
That translates to:
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs }
The call (r_bar $fCInt) mentions $fCInt,
which mentions foo_help,
which mentions r_bar
But we DO want to specialise r_bar at Int:
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
RULE r_bar Int _ = r_bar_Int
r_bar_Int xs = bar Int $fCInt (xs ++ xs)
}
Note that, because of its RULE, r_bar joins the recursive
group. (In this case it'll unravel a short moment later.)
Note [Specialising a recursive group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
let rec { f x = ...g x'...
; g y = ...f y'.... }
in f 'a'
Here we specialise 'f' at Char; but that is very likely to lead to
a specialisation of 'g' at Char. We must do the latter, else the
whole point of specialisation is lost.
But we do not want to keep iterating to a fixpoint, because in the
presence of polymorphic recursion we might generate an infinite number
of specialisations.
So we use the following heuristic:
* Arrange the rec block in dependency order, so far as possible
(the occurrence analyser already does this)
* Specialise it much like a sequence of lets
* Then go through the block a second time, feeding call-info from
the RHSs back in the bottom, as it were
In effect, the ordering maxmimises the effectiveness of each sweep,
and we do just two sweeps. This should catch almost every case of
monomorphic recursion -- the exception could be a very knotted-up
recursion with multiple cycles tied up together.
This plan is implemented in the Rec case of specBindItself.
Note [Specialisations already covered]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We obviously don't want to generate two specialisations for the same
argument pattern. There are two wrinkles
1. We do the already-covered test in specDefn, not when we generate
the CallInfo in mkCallUDs. We used to test in the latter place, but
we now iterate the specialiser somewhat, and the Id at the call site
might therefore not have all the RULES that we can see in specDefn
2. What about two specialisations where the second is an *instance*
of the first? If the more specific one shows up first, we'll generate
specialisations for both. If the *less* specific one shows up first,
we *don't* currently generate a specialisation for the more specific
one. (See the call to lookupRule in already_covered.) Reasons:
(a) lookupRule doesn't say which matches are exact (bad reason)
(b) if the earlier specialisation is user-provided, it's
far from clear that we should auto-specialise further
Note [Auto-specialisation and RULES]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider:
g :: Num a => a -> a
g = ...
f :: (Int -> Int) -> Int
f w = ...
{-# RULE f g = 0 #-}
Suppose that auto-specialisation makes a specialised version of
g::Int->Int That version won't appear in the LHS of the RULE for f.
So if the specialisation rule fires too early, the rule for f may
never fire.
It might be possible to add new rules, to "complete" the rewrite system.
Thus when adding
RULE forall d. g Int d = g_spec
also add
RULE f g_spec = 0
But that's a bit complicated. For now we ask the programmer's help,
by *copying the INLINE activation pragma* to the auto-specialised
rule. So if g says {-# NOINLINE[2] g #-}, then the auto-spec rule
will also not be active until phase 2. And that's what programmers
should jolly well do anyway, even aside from specialisation, to ensure
that g doesn't inline too early.
This in turn means that the RULE would never fire for a NOINLINE
thing so not much point in generating a specialisation at all.
Note [Specialisation shape]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
We only specialise a function if it has visible top-level lambdas
corresponding to its overloading. E.g. if
f :: forall a. Eq a => ....
then its body must look like
f = /\a. \d. ...
Reason: when specialising the body for a call (f ty dexp), we want to
substitute dexp for d, and pick up specialised calls in the body of f.
This doesn't always work. One example I came across was this:
newtype Gen a = MkGen{ unGen :: Int -> a }
choose :: Eq a => a -> Gen a
choose n = MkGen (\r -> n)
oneof = choose (1::Int)
It's a silly exapmle, but we get
choose = /\a. g `cast` co
where choose doesn't have any dict arguments. Thus far I have not
tried to fix this (wait till there's a real example).
Mind you, then 'choose' will be inlined (since RHS is trivial) so
it doesn't matter. This comes up with single-method classes
class C a where { op :: a -> a }
instance C a => C [a] where ....
==>
$fCList :: C a => C [a]
$fCList = $copList |> (...coercion>...)
....(uses of $fCList at particular types)...
So we suppress the WARN if the rhs is trivial.
Note [Inline specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is what we do with the InlinePragma of the original function
* Activation/RuleMatchInfo: both transferred to the
specialised function
* InlineSpec:
(a) An INLINE pragma is transferred
(b) An INLINABLE pragma is *not* transferred
Why (a): transfer INLINE pragmas? The point of INLINE was precisely to
specialise the function at its call site, and arguably that's not so
important for the specialised copies. BUT *pragma-directed*
specialisation now takes place in the typechecker/desugarer, with
manually specified INLINEs. The specialisation here is automatic.
It'd be very odd if a function marked INLINE was specialised (because
of some local use), and then forever after (including importing
modules) the specialised version wasn't INLINEd. After all, the
programmer said INLINE!
You might wonder why we specialise INLINE functions at all. After
all they should be inlined, right? Two reasons:
* Even INLINE functions are sometimes not inlined, when they aren't
applied to interesting arguments. But perhaps the type arguments
alone are enough to specialise (even though the args are too boring
to trigger inlining), and it's certainly better to call the
specialised version.
* The RHS of an INLINE function might call another overloaded function,
and we'd like to generate a specialised version of that function too.
This actually happens a lot. Consider
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINABLE replicateM_ #-}
replicateM_ d x ma = ...
The strictness analyser may transform to
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINE replicateM_ #-}
replicateM_ d x ma = case x of I# x' -> $wreplicateM_ d x' ma
$wreplicateM_ :: (Monad m) => Int# -> m a -> m ()
{-# INLINABLE $wreplicateM_ #-}
$wreplicateM_ = ...
Now an importing module has a specialised call to replicateM_, say
(replicateM_ dMonadIO). We certainly want to specialise $wreplicateM_!
This particular example had a huge effect on the call to replicateM_
in nofib/shootout/n-body.
Why (b): discard INLINABLE pragmas? See Trac #4874 for persuasive examples.
Suppose we have
{-# INLINABLE f #-}
f :: Ord a => [a] -> Int
f xs = letrec f' = ...f'... in f'
Then, when f is specialised and optimised we might get
wgo :: [Int] -> Int#
wgo = ...wgo...
f_spec :: [Int] -> Int
f_spec xs = case wgo xs of { r -> I# r }
and we clearly want to inline f_spec at call sites. But if we still
have the big, un-optimised of f (albeit specialised) captured in an
INLINABLE pragma for f_spec, we won't get that optimisation.
So we simply drop INLINABLE pragmas when specialising. It's not really
a complete solution; ignoring specialisation for now, INLINABLE functions
don't get properly strictness analysed, for example. But it works well
for examples involving specialisation, which is the dominant use of
INLINABLE. See Trac #4874.
************************************************************************
* *
\subsubsection{UsageDetails and suchlike}
* *
************************************************************************
-}
data UsageDetails
= MkUD {
ud_binds :: !(Bag DictBind),
-- See Note [Floated dictionary bindings]
-- The order is important;
-- in ds1 `union` ds2, bindings in ds2 can depend on those in ds1
-- (Remember, Bags preserve order in GHC.)
ud_calls :: !CallDetails
-- INVARIANT: suppose bs = bindersOf ud_binds
-- Then 'calls' may *mention* 'bs',
-- but there should be no calls *for* bs
}
-- | A 'DictBind' is a binding along with a cached set containing its free
-- variables (both type variables and dictionaries)
type DictBind = (CoreBind, VarSet)
{- Note [Floated dictionary bindings]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We float out dictionary bindings for the reasons described under
"Dictionary floating" above. But not /just/ dictionary bindings.
Consider
f :: Eq a => blah
f a d = rhs
$c== :: T -> T -> Bool
$c== x y = ...
$df :: Eq T
$df = Eq $c== ...
gurgle = ...(f @T $df)...
We gather the call info for (f @T $df), and we don't want to drop it
when we come across the binding for $df. So we add $df to the floats
and continue. But then we have to add $c== to the floats, and so on.
These all float above the binding for 'f', and and now we can
successfully specialise 'f'.
So the DictBinds in (ud_binds :: Bag DictBind) may contain
non-dictionary bindings too.
-}
instance Outputable UsageDetails where
ppr (MkUD { ud_binds = dbs, ud_calls = calls })
= text "MkUD" <+> braces (sep (punctuate comma
[text "binds" <+> equals <+> ppr dbs,
text "calls" <+> equals <+> ppr calls]))
emptyUDs :: UsageDetails
emptyUDs = MkUD { ud_binds = emptyBag, ud_calls = emptyDVarEnv }
------------------------------------------------------------
type CallDetails = DIdEnv CallInfoSet
-- The order of specialized binds and rules depends on how we linearize
-- CallDetails, so to get determinism we must use a deterministic set here.
-- See Note [Deterministic UniqFM] in UniqDFM
data CallInfoSet = CIS Id (Bag CallInfo)
-- The list of types and dictionaries is guaranteed to
-- match the type of f
-- The Bag may contain duplicate calls (i.e. f @T and another f @T)
-- These dups are eliminated by already_covered in specCalls
data CallInfo
= CI { ci_key :: CallKey -- Type arguments
, ci_args :: [DictExpr] -- Dictionary arguments
, ci_fvs :: VarSet -- Free vars of the ci_key and ci_args
-- call (including tyvars)
-- [*not* include the main id itself, of course]
}
newtype CallKey = CallKey [Maybe Type]
-- Nothing => unconstrained type argument
type DictExpr = CoreExpr
ciSetFilter :: (CallInfo -> Bool) -> CallInfoSet -> CallInfoSet
ciSetFilter p (CIS id a) = CIS id (filterBag p a)
instance Outputable CallInfoSet where
ppr (CIS fn map) = hang (text "CIS" <+> ppr fn)
2 (ppr map)
pprCallInfo :: Id -> CallInfo -> SDoc
pprCallInfo fn (CI { ci_key = key })
= ppr fn <+> ppr key
ppr_call_key_ty :: Maybe Type -> SDoc
ppr_call_key_ty Nothing = char '_'
ppr_call_key_ty (Just ty) = char '@' <+> pprParendType ty
instance Outputable CallKey where
ppr (CallKey ts) = brackets (fsep (map ppr_call_key_ty ts))
instance Outputable CallInfo where
ppr (CI { ci_key = key, ci_args = args, ci_fvs = fvs })
= text "CI" <> braces (hsep [ ppr key, ppr args, ppr fvs ])
unionCalls :: CallDetails -> CallDetails -> CallDetails
unionCalls c1 c2 = plusDVarEnv_C unionCallInfoSet c1 c2
unionCallInfoSet :: CallInfoSet -> CallInfoSet -> CallInfoSet
unionCallInfoSet (CIS f calls1) (CIS _ calls2) =
CIS f (calls1 `unionBags` calls2)
callDetailsFVs :: CallDetails -> VarSet
callDetailsFVs calls =
nonDetFoldUDFM (unionVarSet . callInfoFVs) emptyVarSet calls
-- It's OK to use nonDetFoldUDFM here because we forget the ordering
-- immediately by converting to a nondeterministic set.
callInfoFVs :: CallInfoSet -> VarSet
callInfoFVs (CIS _ call_info) =
foldrBag (\(CI { ci_fvs = fv }) vs -> unionVarSet fv vs) emptyVarSet call_info
------------------------------------------------------------
singleCall :: Id -> [Maybe Type] -> [DictExpr] -> UsageDetails
singleCall id tys dicts
= MkUD {ud_binds = emptyBag,
ud_calls = unitDVarEnv id $ CIS id $
unitBag (CI { ci_key = CallKey tys
, ci_args = dicts
, ci_fvs = call_fvs }) }
where
call_fvs = exprsFreeVars dicts `unionVarSet` tys_fvs
tys_fvs = tyCoVarsOfTypes (catMaybes tys)
-- The type args (tys) are guaranteed to be part of the dictionary
-- types, because they are just the constrained types,
-- and the dictionary is therefore sure to be bound
-- inside the binding for any type variables free in the type;
-- hence it's safe to neglect tyvars free in tys when making
-- the free-var set for this call
-- BUT I don't trust this reasoning; play safe and include tys_fvs
--
-- We don't include the 'id' itself.
mkCallUDs, mkCallUDs' :: SpecEnv -> Id -> [CoreExpr] -> UsageDetails
mkCallUDs env f args
= -- pprTrace "mkCallUDs" (vcat [ ppr f, ppr args, ppr res ])
res
where
res = mkCallUDs' env f args
mkCallUDs' env f args
| not (want_calls_for f) -- Imported from elsewhere
|| null theta -- Not overloaded
= emptyUDs
| not (all type_determines_value theta)
|| not (spec_tys `lengthIs` n_tyvars)
|| not ( dicts `lengthIs` n_dicts)
|| not (any (interestingDict env) dicts) -- Note [Interesting dictionary arguments]
-- See also Note [Specialisations already covered]
= -- pprTrace "mkCallUDs: discarding" _trace_doc
emptyUDs -- Not overloaded, or no specialisation wanted
| otherwise
= -- pprTrace "mkCallUDs: keeping" _trace_doc
singleCall f spec_tys dicts
where
_trace_doc = vcat [ppr f, ppr args, ppr n_tyvars, ppr n_dicts
, ppr (map (interestingDict env) dicts)]
(tyvars, theta, _) = tcSplitSigmaTy (idType f)
constrained_tyvars = tyCoVarsOfTypes theta
n_tyvars = length tyvars
n_dicts = length theta
spec_tys = [mk_spec_ty tv ty | (tv, ty) <- tyvars `type_zip` args]
dicts = [dict_expr | (_, dict_expr) <- theta `zip` (drop n_tyvars args)]
-- ignores Coercion arguments
type_zip :: [TyVar] -> [CoreExpr] -> [(TyVar, Type)]
type_zip tvs (Coercion _ : args) = type_zip tvs args
type_zip (tv:tvs) (Type ty : args) = (tv, ty) : type_zip tvs args
type_zip _ _ = []
mk_spec_ty tyvar ty
| tyvar `elemVarSet` constrained_tyvars = Just ty
| otherwise = Nothing
want_calls_for f = isLocalId f || isJust (maybeUnfoldingTemplate (realIdUnfolding f))
-- For imported things, we gather call instances if
-- there is an unfolding that we could in principle specialise
-- We might still decide not to use it (consulting dflags)
-- in specImports
-- Use 'realIdUnfolding' to ignore the loop-breaker flag!
type_determines_value pred -- See Note [Type determines value]
= case classifyPredType pred of
ClassPred cls _ -> not (isIPClass cls) -- Superclasses can't be IPs
EqPred {} -> True
IrredPred {} -> True -- Things like (D []) where D is a
-- Constraint-ranged family; Trac #7785
{-
Note [Type determines value]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Only specialise if all overloading is on non-IP *class* params,
because these are the ones whose *type* determines their *value*. In
parrticular, with implicit params, the type args *don't* say what the
value of the implicit param is! See Trac #7101
However, consider
type family D (v::*->*) :: Constraint
type instance D [] = ()
f :: D v => v Char -> Int
If we see a call (f "foo"), we'll pass a "dictionary"
() |> (g :: () ~ D [])
and it's good to specialise f at this dictionary.
So the question is: can an implicit parameter "hide inside" a
type-family constraint like (D a). Well, no. We don't allow
type instance D Maybe = ?x:Int
Hence the IrredPred case in type_determines_value.
See Trac #7785.
Note [Interesting dictionary arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
\a.\d:Eq a. let f = ... in ...(f d)...
There really is not much point in specialising f wrt the dictionary d,
because the code for the specialised f is not improved at all, because
d is lambda-bound. We simply get junk specialisations.
What is "interesting"? Just that it has *some* structure. But what about
variables?
* A variable might be imported, in which case its unfolding
will tell us whether it has useful structure
* Local variables are cloned on the way down (to avoid clashes when
we float dictionaries), and cloning drops the unfolding
(cloneIdBndr). Moreover, we make up some new bindings, and it's a
nuisance to give them unfoldings. So we keep track of the
"interesting" dictionaries as a VarSet in SpecEnv.
We have to take care to put any new interesting dictionary
bindings in the set.
We accidentally lost accurate tracking of local variables for a long
time, because cloned variables don't have unfoldings. But makes a
massive difference in a few cases, eg Trac #5113. For nofib as a
whole it's only a small win: 2.2% improvement in allocation for ansi,
1.2% for bspt, but mostly 0.0! Average 0.1% increase in binary size.
-}
interestingDict :: SpecEnv -> CoreExpr -> Bool
-- A dictionary argument is interesting if it has *some* structure
-- NB: "dictionary" arguments include constraints of all sorts,
-- including equality constraints; hence the Coercion case
interestingDict env (Var v) = hasSomeUnfolding (idUnfolding v)
|| isDataConWorkId v
|| v `elemVarSet` se_interesting env
interestingDict _ (Type _) = False
interestingDict _ (Coercion _) = False
interestingDict env (App fn (Type _)) = interestingDict env fn
interestingDict env (App fn (Coercion _)) = interestingDict env fn
interestingDict env (Tick _ a) = interestingDict env a
interestingDict env (Cast e _) = interestingDict env e
interestingDict _ _ = True
plusUDs :: UsageDetails -> UsageDetails -> UsageDetails
plusUDs (MkUD {ud_binds = db1, ud_calls = calls1})
(MkUD {ud_binds = db2, ud_calls = calls2})
= MkUD { ud_binds = db1 `unionBags` db2
, ud_calls = calls1 `unionCalls` calls2 }
-----------------------------
_dictBindBndrs :: Bag DictBind -> [Id]
_dictBindBndrs dbs = foldrBag ((++) . bindersOf . fst) [] dbs
-- | Construct a 'DictBind' from a 'CoreBind'
mkDB :: CoreBind -> DictBind
mkDB bind = (bind, bind_fvs bind)
-- | Identify the free variables of a 'CoreBind'
bind_fvs :: CoreBind -> VarSet
bind_fvs (NonRec bndr rhs) = pair_fvs (bndr,rhs)
bind_fvs (Rec prs) = foldl delVarSet rhs_fvs bndrs
where
bndrs = map fst prs
rhs_fvs = unionVarSets (map pair_fvs prs)
pair_fvs :: (Id, CoreExpr) -> VarSet
pair_fvs (bndr, rhs) = exprSomeFreeVars interesting rhs
`unionVarSet` idFreeVars bndr
-- idFreeVars: don't forget variables mentioned in
-- the rules of the bndr. C.f. OccAnal.addRuleUsage
-- Also tyvars mentioned in its type; they may not appear
-- in the RHS
-- type T a = Int
-- x :: T a = 3
where
interesting :: InterestingVarFun
interesting v = isLocalVar v || (isId v && isDFunId v)
-- Very important: include DFunIds /even/ if it is imported
-- Reason: See Note [Avoiding loops], the second exmaple
-- involving an imported dfun. We must know whether
-- a dictionary binding depends on an imported dfun,
-- in case we try to specialise that imported dfun
-- Trac #13429 illustrates
-- | Flatten a set of "dumped" 'DictBind's, and some other binding
-- pairs, into a single recursive binding.
recWithDumpedDicts :: [(Id,CoreExpr)] -> Bag DictBind ->DictBind
recWithDumpedDicts pairs dbs
= (Rec bindings, fvs)
where
(bindings, fvs) = foldrBag add
([], emptyVarSet)
(dbs `snocBag` mkDB (Rec pairs))
add (NonRec b r, fvs') (pairs, fvs) =
((b,r) : pairs, fvs `unionVarSet` fvs')
add (Rec prs1, fvs') (pairs, fvs) =
(prs1 ++ pairs, fvs `unionVarSet` fvs')
snocDictBinds :: UsageDetails -> [DictBind] -> UsageDetails
-- Add ud_binds to the tail end of the bindings in uds
snocDictBinds uds dbs
= uds { ud_binds = ud_binds uds `unionBags` listToBag dbs }
consDictBind :: DictBind -> UsageDetails -> UsageDetails
consDictBind bind uds = uds { ud_binds = bind `consBag` ud_binds uds }
addDictBinds :: [DictBind] -> UsageDetails -> UsageDetails
addDictBinds binds uds = uds { ud_binds = listToBag binds `unionBags` ud_binds uds }
snocDictBind :: UsageDetails -> DictBind -> UsageDetails
snocDictBind uds bind = uds { ud_binds = ud_binds uds `snocBag` bind }
wrapDictBinds :: Bag DictBind -> [CoreBind] -> [CoreBind]
wrapDictBinds dbs binds
= foldrBag add binds dbs
where
add (bind,_) binds = bind : binds
wrapDictBindsE :: Bag DictBind -> CoreExpr -> CoreExpr
wrapDictBindsE dbs expr
= foldrBag add expr dbs
where
add (bind,_) expr = Let bind expr
----------------------
dumpUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpUDs bndrs uds@(MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
| null bndrs = (uds, emptyBag) -- Common in case alternatives
| otherwise = -- pprTrace "dumpUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsMentioning dump_set $ -- Drop calls mentioning bndr_set on the floor
deleteCallsFor bndrs orig_calls -- Discard calls for bndr_set; there should be
-- no calls for any of the dicts in dump_dbs
dumpBindUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind, Bool)
-- Used at a let(rec) binding.
-- We return a boolean indicating whether the binding itself is mentioned
-- is mentioned, directly or indirectly, by any of the ud_calls; in that
-- case we want to float the binding itself;
-- See Note [Floated dictionary bindings]
dumpBindUDs bndrs (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace "dumpBindUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs, float_all)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsFor bndrs orig_calls
float_all = dump_set `intersectsVarSet` callDetailsFVs free_calls
callsForMe :: Id -> UsageDetails -> (UsageDetails, [CallInfo])
callsForMe fn (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace ("callsForMe")
-- (vcat [ppr fn,
-- text "Orig dbs =" <+> ppr (_dictBindBndrs orig_dbs),
-- text "Orig calls =" <+> ppr orig_calls,
-- text "Dep set =" <+> ppr dep_set,
-- text "Calls for me =" <+> ppr calls_for_me]) $
(uds_without_me, calls_for_me)
where
uds_without_me = MkUD { ud_binds = orig_dbs
, ud_calls = delDVarEnv orig_calls fn }
calls_for_me = case lookupDVarEnv orig_calls fn of
Nothing -> []
Just cis -> filterCalls cis orig_dbs
-- filterCalls: drop calls that (directly or indirectly)
-- refer to fn. See Note [Avoiding loops]
----------------------
filterCalls :: CallInfoSet -> Bag DictBind -> [CallInfo]
-- See Note [Avoiding loops]
filterCalls (CIS fn call_bag) dbs
= filter ok_call (bagToList call_bag)
where
dump_set = foldlBag go (unitVarSet fn) dbs
-- This dump-set could also be computed by splitDictBinds
-- (_,_,dump_set) = splitDictBinds dbs {fn}
-- But this variant is shorter
go so_far (db,fvs) | fvs `intersectsVarSet` so_far
= extendVarSetList so_far (bindersOf db)
| otherwise = so_far
ok_call (CI { ci_fvs = fvs }) = not (fvs `intersectsVarSet` dump_set)
----------------------
splitDictBinds :: Bag DictBind -> IdSet -> (Bag DictBind, Bag DictBind, IdSet)
-- splitDictBinds dbs bndrs returns
-- (free_dbs, dump_dbs, dump_set)
-- where
-- * dump_dbs depends, transitively on bndrs
-- * free_dbs does not depend on bndrs
-- * dump_set = bndrs `union` bndrs(dump_dbs)
splitDictBinds dbs bndr_set
= foldlBag split_db (emptyBag, emptyBag, bndr_set) dbs
-- Important that it's foldl not foldr;
-- we're accumulating the set of dumped ids in dump_set
where
split_db (free_dbs, dump_dbs, dump_idset) db@(bind, fvs)
| dump_idset `intersectsVarSet` fvs -- Dump it
= (free_dbs, dump_dbs `snocBag` db,
extendVarSetList dump_idset (bindersOf bind))
| otherwise -- Don't dump it
= (free_dbs `snocBag` db, dump_dbs, dump_idset)
----------------------
deleteCallsMentioning :: VarSet -> CallDetails -> CallDetails
-- Remove calls *mentioning* bs in any way
deleteCallsMentioning bs calls
= mapDVarEnv (ciSetFilter keep_call) calls
where
keep_call (CI { ci_fvs = fvs }) = not (fvs `intersectsVarSet` bs)
deleteCallsFor :: [Id] -> CallDetails -> CallDetails
-- Remove calls *for* bs
deleteCallsFor bs calls = delDVarEnvList calls bs
{-
************************************************************************
* *
\subsubsection{Boring helper functions}
* *
************************************************************************
-}
newtype SpecM a = SpecM (State SpecState a)
data SpecState = SpecState {
spec_uniq_supply :: UniqSupply,
spec_module :: Module,
spec_dflags :: DynFlags
}
instance Functor SpecM where
fmap = liftM
instance Applicative SpecM where
pure x = SpecM $ return x
(<*>) = ap
instance Monad SpecM where
SpecM x >>= f = SpecM $ do y <- x
case f y of
SpecM z ->
z
fail = MonadFail.fail
instance MonadFail.MonadFail SpecM where
fail str = SpecM $ fail str
instance MonadUnique SpecM where
getUniqueSupplyM
= SpecM $ do st <- get
let (us1, us2) = splitUniqSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us2 }
return us1
getUniqueM
= SpecM $ do st <- get
let (u,us') = takeUniqFromSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us' }
return u
instance HasDynFlags SpecM where
getDynFlags = SpecM $ liftM spec_dflags get
instance HasModule SpecM where
getModule = SpecM $ liftM spec_module get
runSpecM :: DynFlags -> Module -> SpecM a -> CoreM a
runSpecM dflags this_mod (SpecM spec)
= do us <- getUniqueSupplyM
let initialState = SpecState {
spec_uniq_supply = us,
spec_module = this_mod,
spec_dflags = dflags
}
return $ evalState spec initialState
mapAndCombineSM :: (a -> SpecM (b, UsageDetails)) -> [a] -> SpecM ([b], UsageDetails)
mapAndCombineSM _ [] = return ([], emptyUDs)
mapAndCombineSM f (x:xs) = do (y, uds1) <- f x
(ys, uds2) <- mapAndCombineSM f xs
return (y:ys, uds1 `plusUDs` uds2)
extendTvSubstList :: SpecEnv -> [(TyVar,Type)] -> SpecEnv
extendTvSubstList env tv_binds
= env { se_subst = CoreSubst.extendTvSubstList (se_subst env) tv_binds }
substTy :: SpecEnv -> Type -> Type
substTy env ty = CoreSubst.substTy (se_subst env) ty
substCo :: SpecEnv -> Coercion -> Coercion
substCo env co = CoreSubst.substCo (se_subst env) co
substBndr :: SpecEnv -> CoreBndr -> (SpecEnv, CoreBndr)
substBndr env bs = case CoreSubst.substBndr (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
substBndrs :: SpecEnv -> [CoreBndr] -> (SpecEnv, [CoreBndr])
substBndrs env bs = case CoreSubst.substBndrs (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
cloneBindSM :: SpecEnv -> CoreBind -> SpecM (SpecEnv, SpecEnv, CoreBind)
-- Clone the binders of the bind; return new bind with the cloned binders
-- Return the substitution to use for RHSs, and the one to use for the body
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (NonRec bndr rhs)
= do { us <- getUniqueSupplyM
; let (subst', bndr') = CoreSubst.cloneIdBndr subst us bndr
interesting' | interestingDict env rhs
= interesting `extendVarSet` bndr'
| otherwise = interesting
; return (env, env { se_subst = subst', se_interesting = interesting' }
, NonRec bndr' rhs) }
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (Rec pairs)
= do { us <- getUniqueSupplyM
; let (subst', bndrs') = CoreSubst.cloneRecIdBndrs subst us (map fst pairs)
env' = env { se_subst = subst'
, se_interesting = interesting `extendVarSetList`
[ v | (v,r) <- pairs, interestingDict env r ] }
; return (env', env', Rec (bndrs' `zip` map snd pairs)) }
newDictBndr :: SpecEnv -> CoreBndr -> SpecM CoreBndr
-- Make up completely fresh binders for the dictionaries
-- Their bindings are going to float outwards
newDictBndr env b = do { uniq <- getUniqueM
; let n = idName b
ty' = substTy env (idType b)
; return (mkUserLocalOrCoVar (nameOccName n) uniq ty' (getSrcSpan n)) }
newSpecIdSM :: Id -> Type -> Maybe JoinArity -> SpecM Id
-- Give the new Id a similar occurrence name to the old one
newSpecIdSM old_id new_ty join_arity_maybe
= do { uniq <- getUniqueM
; let name = idName old_id
new_occ = mkSpecOcc (nameOccName name)
new_id = mkUserLocalOrCoVar new_occ uniq new_ty (getSrcSpan name)
`asJoinId_maybe` join_arity_maybe
; return new_id }
{-
Old (but interesting) stuff about unboxed bindings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What should we do when a value is specialised to a *strict* unboxed value?
map_*_* f (x:xs) = let h = f x
t = map f xs
in h:t
Could convert let to case:
map_*_Int# f (x:xs) = case f x of h# ->
let t = map f xs
in h#:t
This may be undesirable since it forces evaluation here, but the value
may not be used in all branches of the body. In the general case this
transformation is impossible since the mutual recursion in a letrec
cannot be expressed as a case.
There is also a problem with top-level unboxed values, since our
implementation cannot handle unboxed values at the top level.
Solution: Lift the binding of the unboxed value and extract it when it
is used:
map_*_Int# f (x:xs) = let h = case (f x) of h# -> _Lift h#
t = map f xs
in case h of
_Lift h# -> h#:t
Now give it to the simplifier and the _Lifting will be optimised away.
The benefit is that we have given the specialised "unboxed" values a
very simple lifted semantics and then leave it up to the simplifier to
optimise it --- knowing that the overheads will be removed in nearly
all cases.
In particular, the value will only be evaluated in the branches of the
program which use it, rather than being forced at the point where the
value is bound. For example:
filtermap_*_* p f (x:xs)
= let h = f x
t = ...
in case p x of
True -> h:t
False -> t
==>
filtermap_*_Int# p f (x:xs)
= let h = case (f x) of h# -> _Lift h#
t = ...
in case p x of
True -> case h of _Lift h#
-> h#:t
False -> t
The binding for h can still be inlined in the one branch and the
_Lifting eliminated.
Question: When won't the _Lifting be eliminated?
Answer: When they at the top-level (where it is necessary) or when
inlining would duplicate work (or possibly code depending on
options). However, the _Lifting will still be eliminated if the
strictness analyser deems the lifted binding strict.
-}
| ezyang/ghc | compiler/specialise/Specialise.hs | bsd-3-clause | 100,486 | 1 | 22 | 29,447 | 11,298 | 6,135 | 5,163 | -1 | -1 |
import Control.Monad hiding (filterM)
yes = flip mapM | mpickering/hlint-refactor | tests/examples/Default125.hs | bsd-3-clause | 54 | 0 | 5 | 8 | 20 | 11 | 9 | 2 | 1 |
module D1 where
sumSquares ((x : xs))
= ((sq pow) x) + (sumSquares xs) where pow = 2
sumSquares [] = 0
sq x = x ^ pow
main = sumSquares [1 .. 4]
| SAdams601/HaRe | old/testing/liftToToplevel/D1AST.hs | bsd-3-clause | 154 | 0 | 9 | 43 | 87 | 47 | 40 | 6 | 1 |
{-# LANGUAGE RecursiveDo #-}
{-# LANGUAGE TemplateHaskell #-}
import Control.Lens
import Control.Monad
import qualified Data.Map as Map
import Reflex
import Reflex.Dom
data Task = Task { _taskDescription :: String
, _taskCompleted :: Bool
}
deriving Show
data Filter = All | Active | Completed deriving (Show, Eq)
data UserEvent a = Delete | Create a | Edit (a -> a)
makeLenses ''Task
applyUserOp :: UserEvent a -> Maybe a -> Maybe a
applyUserOp Delete = const Nothing
applyUserOp (Create a) = const (Just a)
applyUserOp (Edit f) = fmap f
newTask :: String -> Task
newTask taskDesc = Task { _taskDescription = taskDesc, _taskCompleted = False }
initialTasks :: Map.Map Int Task
initialTasks = foldl (\curMap id -> Map.insert id (newTask $ "Task #" ++ show id) curMap) Map.empty [1..5]
satisfiesFilter :: Filter -> Task -> Bool
satisfiesFilter All = const True
satisfiesFilter Completed = view taskCompleted
satisfiesFilter Active = not . view taskCompleted
main = mainWidget app
updateWithMap = flip (Map.foldlWithKey applyUserOperation)
where
applyUserOperation accMap key op = Map.alter (applyUserOp op) key accMap
app = do
rec filterChange <- renderFilters [All, Active, Completed]
toggleAll <- renderToggleAllButton >>= return . attachWith
(\taskMap toggleVal -> Map.map (const toggleVal) taskMap)
(current tasks)
userEvents <- renderApp filteredTasks
newTaskEvent <- renderNewTaskForm
newTaskIds <- mapDyn (+6) =<< count newTaskEvent
tasks <- foldDyn updateWithMap
initialTasks
$ mconcat [ userEvents
, attachDynWith Map.singleton newTaskIds newTaskEvent
, toggleAll
]
activeFilterDyn <- holdDyn All filterChange
filteredTasks <- combineDyn (Map.filter . satisfiesFilter) activeFilterDyn tasks
return ()
renderToggleAllButton :: MonadWidget t m => m (Event t (UserEvent Task))
renderToggleAllButton = do
toggleButton <- button "Toggle All"
return . fmap (Edit . set taskCompleted) . updated =<< toggle False toggleButton
renderApp :: (Ord k, MonadWidget t m) => Dynamic t (Map.Map k Task) -> m (Event t (Map.Map k (UserEvent Task)))
renderApp dynTasks = do
el "h1" $ text "Edit tasks"
el "ul" $ do
listViewWithKey dynTasks $ \k task -> do
el "li" $ do
dynText =<< mapDyn (view taskDescription) task
checkboxChange <- checkboxView (constDyn mempty) =<< mapDyn (view taskCompleted) task
deleteEvent <- button "Delete"
let deleteEvents = fmap (const Delete) deleteEvent
let editEvents = fmap (Edit . set taskCompleted) checkboxChange
return $ leftmost [deleteEvents, editEvents]
renderNewTaskForm = do
elAttr "label" ("for" =: "new-task-name") $ text "Task name: "
t <- textInput (def & attributes .~ constDyn (Map.singleton "id" "new-task-name"))
clickEvent <- button "Create task"
dynCreates <- mapDyn (Create . newTask) (_textInput_value t)
return $ tagDyn dynCreates clickEvent
renderFilters :: MonadWidget t m => [Filter] -> m (Event t Filter)
renderFilters filters =
el "ul" $ do
filterEvents <- forM filters $ \filter -> do
filterClick <- button (show filter)
return $ fmap (const filter) filterClick
return $ leftmost filterEvents
| Ninju/reflex-dom-demos | step-by-step-todo-list/stage-5-toggle-all/source.hs | mit | 3,655 | 0 | 23 | 1,063 | 1,117 | 544 | 573 | 73 | 1 |
{-# LANGUAGE
StandaloneDeriving, GeneralizedNewtypeDeriving,
ForeignFunctionInterface, MultiWayIf #-}
module Basic.Doub
( Doub (..)
, (%)
, module Data.Bits
)
where
import Data.Bits
import GHC.Read
import System.Random
import Text.Printf
-- TODO: Need to think about how to handle the fact that 0/0 is -inf in Basic.
-- Could just be a bug that I choose not implement.
-- Interestingly, Chipmunk BASIC seems to implement all numeric types as IEEE
-- Doubles (and makes such a claim in the manual). For simplicity, this is the
-- only numeric type and is also used for Boolean values.
newtype Doub = D {getDbl :: Double}
-- Let's get all the goodness of Doubles without making an orphan instance for
-- the Bits typeclass
deriving instance Eq Doub
deriving instance Enum Doub
deriving instance Floating Doub
deriving instance Fractional Doub
deriving instance Num Doub
deriving instance Ord Doub
deriving instance Real Doub
deriving instance RealFloat Doub
deriving instance RealFrac Doub
deriving instance Random Doub
instance Show Doub where
showsPrec p (D d) =
let (i, f) = properFraction d
in if | f == 0 -> showString $ show i
| otherwise -> showString $ show d
instance Read Doub where
readPrec = D <$> readPrec
instance Bits Doub where
(.&.) = dblAnd
(.|.) = dblOr
xor = dblXor
complement = dblCmp
shiftL = dblShl
shiftR = dblShr
rotateL = dblRol
rotateR = dblRor
bitSize = const 64
bitSizeMaybe = Just . const 64
isSigned = const True
bit = bitDefault
testBit = testBitDefault
popCount = popCountDefault
-- Since the Doub type is just a newtype wrapper for Double, it can be
-- marshalled just like Doubles without any extra effort. This isn't _really_
-- necessary, but it's fun to play around with.
foreign import ccall dblAnd :: Doub -> Doub -> Doub
foreign import ccall dblOr :: Doub -> Doub -> Doub
foreign import ccall dblXor :: Doub -> Doub -> Doub
foreign import ccall dblCmp :: Doub -> Doub
foreign import ccall dblShl :: Doub -> Int -> Doub
foreign import ccall dblShr :: Doub -> Int -> Doub
foreign import ccall dblRol :: Doub -> Int -> Doub
foreign import ccall dblRor :: Doub -> Int -> Doub
foreign import ccall dblMod :: Doub -> Doub -> Doub
infixl 7 %
(%) :: Doub -> Doub -> Doub
(%) = dblMod
| dmringo/pure-basic | src/Basic/Doub.hs | mit | 2,424 | 0 | 12 | 597 | 528 | 297 | 231 | -1 | -1 |
{-# LANGUAGE ViewPatterns, MultiWayIf #-}
{-# OPTIONS_GHC -fno-warn-incomplete-patterns #-}
module Core.BPS
( preProcess
, pathfind
, astar
, fillPath
, Path (..)
) where
import Prelude hiding ((.), id)
import Core.Types
import Control.Monad
import Control.Monad.ST
import Data.Array.Unboxed
import Data.Array.ST
import Data.Bits
import Data.Maybe
import Data.Word
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Heap (Heap, Entry)
import qualified Data.Heap as Heap
dirBit :: Dir4 -> Word16
dirBit N4 = bit 0
dirBit E4 = bit 2
dirBit S4 = bit 4
dirBit W4 = bit 6
-- | the @preProcess@ function pre-processes the level solidity array,
-- which is True when a tile is solid (or non-traversable) and False
-- when a tile is traversable. For each tile it encodes a 16-bit word
-- which encodes the adjacency information for that tile (i.e. whether
-- the tiles to the north, east, south and west are blocked), as well
-- as the /hooks/. The adjacency information is stored in the lowest 8
-- bits, while the hooks are in the upper bit. An example of a such a
-- Word16 would be:
--
-- > -----------------------------
-- > | w s e n | |
-- > | ac ac ac ac | w s e n |
-- > | 11 01 00 00 | 01 00 00 00 | = 0xD10 = 3344
-- > -----------------------------
-- > | hooks | adjacency |
-- > -----------------------------
--
-- The adjacency here is telling us that for the tile, the tile
-- directly west of it is solid. All the other tiles are free.
-- Calling the tile @x@ we can represent this as
--
-- > +---+---+---+
-- > |XXX| |XXX|
-- > +---+---+---+
-- > |XXX| | |
-- > +---+---+---+
-- > |XXX| n | |
-- > +---+---+---+
-- > |XXX| |XXX|
-- > +---+---+---+
--
-- Where XXX represents a solid tile. The hooks tell us when we enter
-- a tile from a direction, do we need to hook a left or a right turn
-- (anticlockwise and clockwise respectively). This is what the @a@
-- and @c@ bits mean for each direction.
--
-- When do we need to do this? Consider moving into this tile from m
-- in the east (going WEST):
--
-- > +---+---+---+
-- > |XXX|>=2|XXX|
-- > +---+---+---+
-- > |XXX| | 1 |
-- > +---+---+---+
-- > |XXX| n<==m |
-- > +---+---+---+
-- > |XXX| |XXX|
-- > +---+---+---+
--
-- We check the tiles northwards of n and m. For m the nearest
-- northward wall is 1 away, while for n the nearest wall is at least
-- two or greater away (it's not on the picture). Because 2 is
-- strictly greater than 1, we potentially need to turn clockwise to
-- explore this new space that's opened up to our north. We also need
-- to turn anticlockwise for the same reason, as more space has opened
-- up to our south. Essentially for every direction moving into every
-- square, we check whether the dungeon wall in the perpendicular
-- directions is increasing in distance, and if it is, we need to
-- explore that new space so we /hook/ a turn in that direction. This
-- isn't very useful here, as we will hit a wall and need to turn
-- anyway, but consider the following example:
--
-- > +---+---+---+
-- > |XXX| a |XXX|
-- > +---+---+---+
-- > |XXX| b | c |
-- > +---+---+---+
-- > |XXX| d | e |
-- > +---+---+---+
-- > |XXX| f |XXX|
-- > +---+---+---+
--
-- If we are going from f to d, we must hook a right (clockwise). If
-- we go from b to d we just continue onwards to f without trying to
-- explore e. This is fine, and won't impact our search, because we'll
-- explore e if necessary because we'll hook a left going from a to b,
-- reaching c and then exploring e if neccessary from that direction
-- (should the square east of c be blocked). Essentially we use this
-- fact to exploit the symmetry of a grid map and massively reduce the
-- number of points we need to search compared to a naive A* search.
--
-- The clever bit of the encoding as a Word16 is that we can view as
-- square from a specific direction by bitshifting it by 2 * the
-- direction (where north is 0, east is 1, south is 2, and west is 3)
-- and then masking it with 0x301 (see below). If the result of this
-- is greater than 0 then we've either blocked or reached turning
-- point where we need to hook a left or right turn. This allows us to
-- avoid many expensive comparisons in the core of our pathfinding
-- loop.
--
-- > w `shiftR` (fromEnum d * 2) .&. 0x301
--
-- All my tests show that this algorithm always finds the optimal path
-- if it exists, but I haven't seen it in the literature so I can't
-- say this for sure. Overall what it's trying to do is exploit the
-- grid symmetries to prune the A* search tree, so assuming this
-- pruning is good (and I believe it is) we should be fine.
--
-- I'm calling it BPS or Box Point Search for now, because it's kinda
-- somewhat like JPS but it works with boxes.
preProcess :: UArray (Row, Col) Bool -> UArray (Row, Col) Word16
preProcess solid = runSTUArray $ do
let (_, (maxR, maxC)) = bounds solid
arr <- newArray ((0, 0), (maxR, maxC)) 0
forM_ (range ((0, 0), (maxR, maxC))) $ \p ->
when (not (solid ! p)) $ writeArray arr p (bitfield solid p)
return arr
bitfield :: UArray (Row, Col) Bool -> (Row, Col) -> Word16
bitfield s p = adjacency s p .|. foldr (.|.) 0 (map (turnPoint s p) [N4, E4, S4, W4])
adjacency :: UArray (Row, Col) Bool -> (Row, Col) -> Word16
adjacency solid p = test N4 .|. test E4 .|. test S4 .|. test W4
where
test d = if solid ! move4 d p then dirBit d else 0
-- | Distance to a wall in a direction.
d2w :: UArray (Row, Col) Bool -> (Row, Col) -> Dir4 -> Int
d2w solid p d
| solid ! p = 0
| otherwise = 1 + d2w solid (move4 d p) d
turnPoint :: UArray (Row, Col) Bool -> (Row, Col) -> Dir4 -> Word16
turnPoint solid (r, c) d = jp d `shiftL` (fromEnum d * 2)
where
-- Going south
jp S4 = (d2w solid (r - 1, c - 1) W4) `lt8` (d2w solid (r, c - 1) W4) -- Hook clockwise
.|. (d2w solid (r - 1, c + 1) E4) `lt9` (d2w solid (r, c + 1) E4) -- Hook anticlockwise
-- Going west
jp W4 = (d2w solid (r - 1, c + 1) N4) `lt8` (d2w solid (r - 1, c) N4) -- Hook clockwise
.|. (d2w solid (r + 1, c + 1) S4) `lt9` (d2w solid (r + 1, c) S4) -- Hook anticlockwise
-- Going north
jp N4 = (d2w solid (r + 1, c + 1) E4) `lt8` (d2w solid (r, c + 1) E4) -- Hook clockwise
.|. (d2w solid (r + 1, c - 1) W4) `lt9` (d2w solid (r, c - 1) W4) -- Hook anticlockwise
-- Going east
jp E4 = (d2w solid (r + 1, c - 1) S4) `lt8` (d2w solid (r + 1, c) S4) -- Hook clockwise
.|. (d2w solid (r - 1, c - 1) N4) `lt9` (d2w solid (r - 1, c) N4) -- Hook anticlockwise
lt8 :: Int -> Int -> Word16
lt8 x y | x < y = bit 8
| otherwise = 0
lt9 :: Int -> Int -> Word16
lt9 x y | x < y = bit 9
| otherwise = 0
-- | d `into` w is nonzero if w is either a left or a right hook, or d
-- is blocked leaving w.
into :: Dir4 -> Word16 -> Word16
into d w = w `shiftR` (fromEnum d * 2) .&. 0x301
openFrom :: Dir4 -> Word16 -> [Dir4]
openFrom from w = filter (\d -> (w .|. dirBit (opposite4 from)) .&. dirBit d == 0) [N4, E4, S4, W4]
open :: Word16 -> [Dir4]
open w = filter (\d -> w .&. dirBit d == 0) [N4, E4, S4, W4]
hook :: Dir4 -> Word16 -> Word16
hook d w = w `shiftR` (fromEnum d * 2 + 8) .&. 0x3
data Path = Path { distance :: Int, dir :: Dir4, nodes :: [(Row, Col)] } deriving Show
fillPath :: [(Row, Col)] -> [(Row, Col)]
fillPath (p1@(r1, c1) : p2@(r2, c2) : path)
| manhattan p1 p2 == 1 = fillPath (p2 : path)
| c1 < c2 = fillPath ((r1, c1 + 1) : p2 : path)
| c2 < c1 = fillPath ((r1, c1 - 1) : p2 : path)
| r1 < r2 = fillPath ((r1 + 1, c1) : p2 : path)
| r2 < r1 = fillPath ((r1 - 1, c1) : p2 : path)
fillPath path = path
intersect :: Dir4 -> (Row, Col) -> (Row, Col) -> Bool
intersect d (r1, c1) (r2, c2)
| d == N4 || d == S4 = r1 == r2
| otherwise = c1 == c2
walk :: UArray (Row, Col) Word16 -> (Row, Col) -> (Row, Col) -> Dir4 -> (Row, Col)
walk solid dest p d
| d `into` (solid ! move4 d p) > 0 || intersect d dest (move4 d p) = move4 d p
| otherwise = walk solid dest (move4 d p) d
type Visited s = STUArray s (Row, Col) Bool
entry :: Visited s
-> Path
-> (Row, Col)
-> (Row, Col)
-> Dir4
-> (Row, Col)
-> ST s (Maybe (Entry Int Path))
entry visited path dest from d to = do
vis <- readArray visited to
return $ if vis then Nothing else Just $
Heap.Entry (distance path + manhattan from to + manhattan to dest)
(Path (distance path + manhattan from to) d (to : nodes path))
pathfind' :: UArray (Row, Col) Word16
-> (Row, Col)
-> Visited s
-> Heap (Entry Int Path)
-> ST s (Maybe Path)
pathfind' _ _ _ (Heap.viewMin -> Nothing) = return Nothing
pathfind' solid dest visited (Heap.viewMin -> Just (Heap.Entry priority path, heap))
| priority == distance path = return $ Just path
| otherwise = do
let p = head (nodes path)
vis <- readArray visited p
if vis then pathfind' solid dest visited heap else do
let w = solid ! p
h = hook (dir path) w
turns = if | h == 1 -> [clock (dir path)]
| h == 2 -> [anticlock (dir path)]
| h == 3 -> [clock (dir path), anticlock (dir path)]
| otherwise -> []
writeArray visited p True
if | w .&. dirBit (dir path) > 0 -> do
heap' <- newHeap turns p
pathfind' solid dest visited $ heap `Heap.union` heap'
| h > 0 -> do
heap' <- newHeap (dir path: turns) p
pathfind' solid dest visited $ heap `Heap.union` heap'
| otherwise -> do
heap' <- newHeap (openFrom (dir path) w) p
pathfind' solid dest visited $ heap `Heap.union` heap'
where
newHeap dirs p =
Heap.fromList . catMaybes <$> sequence (map (entry visited path dest p <*> walk solid dest p) dirs)
startHeap :: UArray (Row, Col) Word16 -> (Row, Col) -> (Row, Col) -> Heap (Entry Int Path)
startHeap solid start dest = Heap.fromList (startEntry <*> walk solid dest start <$> open (solid ! start))
where
startEntry d p = Heap.Entry (manhattan dest p + manhattan start p) (Path (manhattan start p) d [p, start])
pathfind :: UArray (Row, Col) Word16 -> (Row, Col) -> (Row, Col) -> Maybe [(Row, Col)]
pathfind solid start dest = fmap (reverse . nodes) $ runST $ do
visited <- newArray (bounds solid) False
writeArray visited start True
pathfind' solid dest visited (startHeap solid start dest)
-- Implementation of A*, for reference:
entryA :: Set (Row, Col)
-> Path
-> (Row, Col)
-> (Row, Col)
-> Dir4
-> (Row, Col)
-> Maybe (Entry Int Path)
entryA visited path dest from d to
| Set.member to visited = Nothing
| otherwise = Just $
Heap.Entry (distance path + manhattan from to + manhattan to dest)
(Path (distance path + manhattan from to) d (to : nodes path))
astar' :: UArray (Row, Col) Bool
-> (Row, Col)
-> (Set (Row, Col), Heap (Entry Int Path))
-> Maybe Path
astar' _ _ (_, Heap.viewMin -> Nothing) = Nothing
astar' solid dest (visited, Heap.viewMin -> Just (Heap.Entry priority path, heap))
| priority == distance path = Just path
| otherwise =
let p = head (nodes path)
w = adjacency solid p
newHeap dirs = Heap.fromList (mapMaybe (entryA visited path dest p <*> flip move4 p) dirs)
in if Set.member p visited
then astar' solid dest (visited, heap)
else astar' solid dest (Set.insert p visited, heap `Heap.union` newHeap (openFrom (dir path) w))
astarStartHeap :: UArray (Row, Col) Bool -> (Row, Col) -> (Row, Col) -> Heap (Entry Int Path)
astarStartHeap solid start dest = Heap.fromList . map heapEntry $ open (adjacency solid start)
where
heapEntry d | p <- move4 d start = Heap.Entry (1 + manhattan p dest) (Path 1 d [p, start])
astar :: UArray (Row, Col) Bool -> (Row, Col) -> (Row, Col) -> Maybe Path
astar solid start dest = astar' solid dest (Set.insert start Set.empty, astarStartHeap solid start dest)
| jameshsmith/HRL | Server/Core/BPS.hs | mit | 12,366 | 0 | 20 | 3,340 | 3,864 | 2,065 | 1,799 | 165 | 7 |
module Server where
import qualified Data.ByteString.UTF8 as UTF8
import qualified Data.ByteString as B
import qualified Data.ByteString.Lazy as BL
import Network.Socket hiding (send, sendTo, recv, recvFrom)
import Network.Socket.ByteString
import Network.DNS as DNS
type HandlerFunc = B.ByteString -> IO ()
serveLog :: String
-> HandlerFunc
-> IO ()
serveLog port handlerFunc = withSocketsDo $
do
addrinfos <- getAddrInfo
(Just (defaultHints {addrFlags = [AI_PASSIVE]}))
Nothing (Just port)
let serveraddr = head addrinfos
sock <- socket (addrFamily serveraddr) Datagram defaultProtocol
bind sock (addrAddress serveraddr)
procMessages sock
where procMessages sock =
do
(msg, addr) <- recvFrom sock 1024
handlerFunc msg
procMessages sock
logHandler :: HandlerFunc
logHandler msg =
case domainFromRequest msg of
Left msg -> putStrLn msg
Right domain -> putStrLn $ UTF8.toString domain
domainFromRequest :: B.ByteString -> Either String Domain
domainFromRequest msg =
case DNS.decode (BL.fromStrict msg) of
Left msg -> Left msg
Right packet -> Right (qname (head (question packet)))
| markdrago/tweed | src/server.hs | mit | 1,302 | 0 | 15 | 368 | 377 | 193 | 184 | 35 | 2 |
-- Copyright © 2013 Julian Blake Kongslie <[email protected]>
-- Licensed under the MIT license.
{-# LANGUAGE RecordWildCards #-}
module Spec
where
import Control.Applicative
import qualified Data.Map as M
import System.FilePath
import Text.JSON
readMap :: (JSON v) => JSValue -> Result (M.Map String v)
readMap j = do
al <- readJSON j
return $ M.fromList $ fromJSObject al
showMap :: (JSON v) => M.Map String v -> JSValue
showMap m = JSObject $ toJSObject $ M.assocs $ M.map showJSON m
data Spec = Spec
{ outputDir :: FilePath
, outputURL :: FilePath
, modPacks :: M.Map String ModPack
}
instance JSON Spec where
readJSON j = do
m <- readMap j
Spec
<$> readJSON (m M.! "dir")
<*> readJSON (m M.! "url")
<*> readMap (m M.! "packs")
showJSON (Spec {..}) = showMap $ M.fromList
[ ("dir", showJSON outputDir)
, ("url", showJSON outputURL)
, ("packs", showMap modPacks)
]
data ModPack = ModPack
{ niceName :: String
, background :: FilePath
, icon :: FilePath
, logo :: FilePath
, versions :: M.Map String ModPackVersion
, recVersion :: String
, newVersion :: String
}
instance JSON ModPack where
readJSON j = do
m <- readMap j
ModPack
<$> readJSON (m M.! "name")
<*> readJSON (m M.! "background")
<*> readJSON (m M.! "icon")
<*> readJSON (m M.! "logo")
<*> readMap (m M.! "versions")
<*> readJSON (m M.! "recommended")
<*> readJSON (m M.! "latest")
showJSON (ModPack {..}) = showMap $ M.fromList
[ ("name", showJSON niceName)
, ("background", showJSON background)
, ("icon", showJSON icon)
, ("logo", showJSON logo)
, ("versions", showMap versions)
, ("recommended", showJSON recVersion)
, ("latest", showJSON newVersion)
]
data ModPackVersion = ModPackVersion
{ minecraftVersion :: String
, minecraftJar :: FilePath
, mods :: M.Map String ModVersion
}
instance JSON ModPackVersion where
readJSON j = do
m <- readMap j
ModPackVersion
<$> readJSON (m M.! "minecraft")
<*> readJSON (m M.! "minecraftJar")
<*> readMap (m M.! "mods")
showJSON (ModPackVersion {..}) = showMap $ M.fromList
[ ("minecraft", showJSON minecraftVersion)
, ("minecraftJar", showJSON minecraftJar)
, ("mods", showMap mods)
]
data ModVersion = ModVersion
{ version :: String
, zipFile :: FilePath
}
instance JSON ModVersion where
readJSON j = do
m <- readMap j
ModVersion
<$> readJSON (m M.! "version")
<*> readJSON (m M.! "zip")
showJSON (ModVersion {..}) = showMap $ M.fromList
[ ("version", showJSON version)
, ("zip", showJSON zipFile)
]
| jblake/solderapi | src/Spec.hs | mit | 2,782 | 0 | 17 | 751 | 936 | 497 | 439 | 81 | 1 |
-- author : Lukasz Wolochowski ([email protected])
module RelationalStructureTest (tests) where
import Test.Tasty
import Test.QuickCheck.Instances
import Test.QuickCheck.Modifiers
import Test.Tasty.QuickCheck as QC
import Test.Tasty.HUnit
import Data.Set(Set)
import qualified Data.Set as Set
import qualified Data.Map as Map
import Debug.Trace
import RelationalStructure
import Utils
import UtilsTest hiding (tests)
genFun :: (Arbitrary rname, Arbitrary element, CoArbitrary element, Ord element) => Gen (Set element -> Relation rname element)
genFun = do
rname <- arbitrary
ar <- arbitrary
f <- arbitrary
return (\elts -> createRelation rname ar elts f)
instance (Arbitrary rname, Arbitrary element, CoArbitrary element, Ord element) => Arbitrary (Relation rname element) where
arbitrary = do
f <- genFun
elts <- arbitrary `suchThat` (\s -> Set.size s < 10)
return (f elts)
shrink _ = []
instance (Arbitrary rname, Arbitrary element, CoArbitrary element, Ord element, Ord rname) => Arbitrary (Structure rname element) where
arbitrary = do
elts <- arbitrary `suchThat` (\s -> Set.size s < 10)
relNum <- QC.elements [1,2,3,4,5]
fs <- QC.vectorOf relNum genFun
let rels = map (\f -> f elts) fs
let sig = sigFromRels rels
return (createStructure sig elts rels)
checkRelation :: (Ord element) => Set element -> Relation rname element -> Bool
checkRelation elts (Relation (_, ar, tuples)) =
all check_tuple (Set.toList tuples)
where
check_tuple (Tuple t) =
(arity (Tuple t) == ar) && (all (\e -> Set.member e elts) t)
checkStructure :: (Ord rname, Ord element) => Structure rname element -> Bool
checkStructure (Structure (Signature sigMap, elts, relMap)) =
(Map.keysSet sigMap == Map.keysSet relMap)
&& (all
(\ (rname, Relation (rname', ar, tuples)) ->
(ar == relationArity (Signature sigMap) rname)
&& checkRelation elts (Relation (rname', ar, tuples))
)
(Map.toList relMap))
tests :: TestTree
tests = testGroup "RelationalStructure" [testRelation, testStructure, testAutomorphism, testSubstructure, testPowerStructure]
testRelation = QC.testProperty "check relation"
(forAll (genFun :: Gen (Set Int -> Relation Char Int)) (\f elts -> Set.size elts < 10 QC.==> checkRelation elts (f elts)))
testStructure = QC.testProperty "check structure" (checkStructure :: Structure Char Int -> Bool)
testAutomorphism = QC.testProperty "check automorphism" (\(str :: Structure Char Int) -> isHomomorphism str str id)
testSubstructure =
QC.testProperty "check automorphism"
(\(fsub :: Int -> Bool) (str :: Structure Char Int) ->
let substr = substructure str (Set.filter fsub $ structureElems str)
in isHomomorphism substr str id)
testPowerStructure =
QC.testProperty "power structure"
(\(str :: Structure Char Int) ->
Set.size (structureElems str) < 5 QC.==>
let p = structPower str 2 in
isHomomorphism p str (\(Tuple [e, _]) -> e)
&& isHomomorphism p str (\(Tuple [e, _]) -> e)
&& isHomomorphism str p (\e -> Tuple [e, e]))
| luke725/alphabets | tests/RelationalStructureTest.hs | mit | 3,139 | 83 | 18 | 627 | 1,249 | 663 | 586 | -1 | -1 |
module Main where
import Control.Monad
import Data.Maybe
import System.Directory
import System.Environment
import System.Exit
import System.FilePath.Posix
import qualified Config
import System.Tmux
parseInt x = read x :: Int
-- Workspace management actions.
workspaceWindows :: IO (Maybe [Int])
workspaceWindows =
do windows <- listWindows (Target "workspace")
return $ fmap (map (parseInt . snd . head)) windows
nextWorkspaceWindow :: IO (Maybe TmuxNoun)
nextWorkspaceWindow =
do windows <- workspaceWindows
return $ fmap (\ws -> windowTarget "workspace" $ (maximum ws) + 1) windows
unlinkWorkspaceWindow w = unlinkWindow (windowTarget "workspace" w)
unlinkWorkspaceWindows =
do Just (_:windows) <- workspaceWindows
void $ mapM unlinkWorkspaceWindow windows
-- Tmux configuration actions.
headlessSession options = newSession $ [Flag "d"] ++ options
setupCommand target command = runCommand (Config.command command) target
addCommandWindow rootPath session pane command =
do let path = rootPath </> (fromMaybe "" $ Config.path command)
splitWindow [Parameter "c" path] $ paneTarget session 0 pane
setupCommand (paneTarget session 0 $ pane + 1) command
setupFromConfig (Config.Process rootPath cfg) =
do let windowName = Config.name cfg
let firstPath = rootPath </> (fromMaybe "" $ Config.rootPath cfg)
created <- fmap isJust $
headlessSession [Parameter "c" firstPath] (Source windowName)
-- When initializing a process, setup each of its commands in its own split,
-- straighten out layout, and set the window name.
when created
(do let (first:rest) = Config.commands cfg
setupCommand (Target windowName) first
mapM (uncurry $ addCommandWindow firstPath windowName) $ zip [0..] rest
setVerticalLayout $ Target windowName
void $ renameWindow windowName $ Target windowName)
-- Link the window into the workspace.
Just nextWindow <- nextWorkspaceWindow
linkWindow (windowSource windowName 0) nextWindow
setupWorkspace rootPath cfgs =
-- Make or reuse a workspace then link in all of the needed processes.
do created <- fmap isJust $ headlessSession [Parameter "c" rootPath] (Source "workspace")
unless created unlinkWorkspaceWindows
void $ mapM setupFromConfig cfgs
getRoot [] = getCurrentDirectory
getRoot [path] = canonicalizePath path
main =
do rootPath <- getArgs >>= getRoot
-- Crawl the selected process tree and ensure that there are processes to be run.
ps <- Config.getProcesses' rootPath
when (null ps)
(do putStrLn "Invalid config file."
exitFailure)
-- Run and link all processes then exit to the workspace.
setupWorkspace rootPath ps
attachSession (Target "workspace")
| mattneary/tux | src/Main.hs | mit | 2,812 | 0 | 15 | 587 | 769 | 368 | 401 | 55 | 1 |
module TestImport
( module TestImport
, module X
) where
import Application (makeFoundation)
import ClassyPrelude as X
import Database.Persist as X hiding (get)
import Database.Persist.Sql (SqlPersistM, SqlBackend, runSqlPersistMPool, rawExecute, rawSql, unSingle, connEscapeName)
import Foundation as X
import Model as X
import Test.Hspec as X
import Yesod.Default.Config2 (ignoreEnv, loadAppSettings)
import Yesod.Test as X
runDB :: SqlPersistM a -> YesodExample App a
runDB query = do
app <- getTestYesod
liftIO $ runDBWithApp app query
runDBWithApp :: App -> SqlPersistM a -> IO a
runDBWithApp app query = runSqlPersistMPool query (appConnPool app)
withApp :: SpecWith App -> Spec
withApp = before $ do
settings <- loadAppSettings
["config/test-settings.yml", "config/settings.yml"]
[]
ignoreEnv
foundation <- makeFoundation settings
wipeDB foundation
return foundation
-- This function will truncate all of the tables in your database.
-- 'withApp' calls it before each test, creating a clean environment for each
-- spec to run in.
wipeDB :: App -> IO ()
wipeDB app = runDBWithApp app $ do
tables <- getTables
sqlBackend <- ask
let queries = map (\t -> "TRUNCATE TABLE " ++ connEscapeName sqlBackend (DBName t)) tables
-- In MySQL, a table cannot be truncated if another table references it via foreign key.
-- Since we're wiping both the parent and child tables, though, it's safe
-- to temporarily disable this check.
rawExecute "SET foreign_key_checks = 0;" []
forM_ queries (\q -> rawExecute q [])
rawExecute "SET foreign_key_checks = 1;" []
getTables :: MonadIO m => ReaderT SqlBackend m [Text]
getTables = do
tables <- rawSql "SHOW TABLES;" []
return $ map unSingle tables
| isankadn/yesod-testweb-full | test/TestImport.hs | mit | 1,869 | 0 | 17 | 440 | 438 | 231 | 207 | 39 | 1 |
-- This module controls the parameters that the analysis accepts from a user.
-- In the near future, this will be replaced by a higher level DSL.
{-# LANGUAGE OverloadedStrings #-}
module Skel.MVP.UserParameters
( module Skel.MVP.UserParameters
, module Skel.MVP.UserModel
, module DataAnalysis.Application.Import
, MvpParams(..)
) where
import qualified Data.Text as T
import DataAnalysis.Application.Import
import Skel.MVP.UserModel
-- | Parameters to the analysis.
data MvpParams = MvpParams
{ paramsFrom :: Maybe Day
, paramsTo :: Maybe Day
}
-- | Make a form for the parameters, uses the 'Default' instance for
-- the default values.
instance HasForm MvpParams where
form = MvpParams <$> date "From" <*> date "To"
where date label =
fmap Just
(areq (checkMMap (return . parseDate . T.unpack)
(T.pack . show)
textField)
label
Nothing)
| teuffy/min-var-ci | src/Skel/MVP/UserParameters.hs | mit | 1,053 | 0 | 15 | 339 | 174 | 103 | 71 | 21 | 0 |
sum' :: (Num a) => [a] -> a
sum' = foldl (+) 0
maximum' :: (Ord a) => [a] -> a
maximum' = foldl1 max
reverse' :: [a] -> [a]
reverse' = foldl (flip (:)) []
-- reverse' = foldl (\acc x -> x : acc) []
elem' :: (Eq a) => a -> [a] -> Bool
elem' x = foldl (\acc y -> if y == x then True else acc) False
map' :: (a -> b) -> [a] -> [b]
map' f = foldr (\x acc -> f x : acc) []
filter' :: (a -> Bool) -> [a] -> [a]
filter' p = foldr (\x acc -> if p x then x : acc else acc) []
product' :: (Num a) => [a] -> a
product' = foldl1 (*)
head' :: [a] -> a
head' = foldr1 (\x _ -> x)
-- head' = foldl1 (\acc _ -> acc)
last' :: [a] -> a
last' = foldl1 (\_ x -> x)
-- last' = foldr1 (\_ acc -> acc)
| pradyuman/haskell | fold.hs | mit | 687 | 0 | 9 | 180 | 376 | 210 | 166 | 18 | 2 |
module Types.Wall(
Wall(..),
innards,
randWall
) where
import Control.Applicative
import Control.Monad(liftM)
import Data.Universe
import Data.Default
import Types.Size
import Types.Jewel
import Types.Item
import Types.Consumable
import Random.Probability
data Wall = NormalWall
| GoldWall Size
| JewelWall Jewel
| ItemWall Item
| ConsumableWall Consumable
| Concrete -- indestructable
deriving Eq
instance Show Wall where
show NormalWall = "a regular old wall"
show Concrete = "an indestructable concrete wall"
show w = "a wall with " ++ innards w ++ " in it"
instance Default Wall where
def = NormalWall
instance Universe Wall where
universe = ([NormalWall, Concrete] ++ ) $ concat
[GoldWall <$> sizes,
JewelWall <$> js ,
ItemWall <$> items,
ConsumableWall <$> cs ]
where sizes = universe :: [Size]
js = universe :: [Jewel]
items = universe :: [Item]
cs = universe :: [Consumable]
innards :: Wall -> String
innards NormalWall = ""
innards (GoldWall s) = "a " ++ show s ++ " chunk of gold"
innards (JewelWall j) = "a " ++ show j
innards (ItemWall i) = show i
innards (ConsumableWall c) = "a " ++ show c
-- mostly normal and gold walls, no concrete randomly
randWall :: MonadRandom m => m Wall
randWall = fromList [ (return NormalWall, 30)
, (liftM GoldWall randSize, 10)
, (liftM JewelWall randJewel, 5)
, (liftM ItemWall randItem, 1)
, (liftM ConsumableWall randConsumable, 2) ] >>= id
| 5outh/textlunky | src/Types/Wall.hs | mit | 1,797 | 0 | 9 | 635 | 462 | 257 | 205 | 48 | 1 |
module Main where
import System.Random
import System.IO (hFlush, stdout)
import Control.Monad (unless)
main :: IO ()
main = do
gen <- getStdGen
askForNum gen
where
askForNum :: StdGen -> IO ()
askForNum generator = do
let (number, newGen) = randomR (1, 10) generator :: (Int, StdGen)
putStr "Which number in range 1 to 10 am I thinking of ? "
hFlush stdout
numberString <- getLine
unless (null numberString) $ do
if (read numberString:: Int) == number
then putStrLn "You are correct!"
else putStrLn ("Sorry, it was " ++ show number)
askForNum newGen | Forec/learn | 2017.1/joking-haskell/joking4.hs | mit | 649 | 0 | 16 | 191 | 201 | 101 | 100 | 19 | 2 |
module Language.Dash.VM.VM (
execute
, getVMHeapArray
, getVMHeapValue
) where
import Data.Word
import Foreign.C
import Foreign.Storable
import Foreign.Marshal.Array
import Foreign.Ptr
import Language.Dash.IR.Data (SymbolNameList)
import Language.Dash.VM.Types
-- TODO change order in return value! (sym names and const table)
execute :: [VMWord] -> [VMWord] -> SymbolNameList -> IO (VMWord, [VMWord], SymbolNameList)
execute prog ctable symNames =
withArray (map CUInt prog) (\progPtr ->
withArray (map CUInt ctable) (\ctablePtr ->
foreignVMExecute progPtr
(fromIntegral $ length prog)
ctablePtr (fromIntegral $ length ctable)
))
>>= \a ->
return (a, ctable, symNames)
getVMHeapValue :: VMWord -> IO VMWord
getVMHeapValue addr = do
let ptr = foreignVMGetHeapPointer addr
peek ptr
getVMHeapArray :: VMWord -> Int -> IO [VMWord]
getVMHeapArray addr len = do
let ptr = foreignVMGetHeapPointer addr
peekArray len ptr
-- This will not call back into Haskell, so we can mark it unsafe
foreign import ccall unsafe "vm_execute" foreignVMExecute
:: Ptr CUInt -> CInt -> Ptr CUInt -> CInt -> IO Word32
foreign import ccall unsafe "vm_get_heap_pointer" foreignVMGetHeapPointer
:: Word32 -> Ptr VMWord
| arne-schroppe/dash | src/Language/Dash/VM/VM.hs | mit | 1,353 | 0 | 15 | 329 | 355 | 190 | 165 | 32 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.