code
stringlengths 5
1.03M
| repo_name
stringlengths 5
90
| path
stringlengths 4
158
| license
stringclasses 15
values | size
int64 5
1.03M
| n_ast_errors
int64 0
53.9k
| ast_max_depth
int64 2
4.17k
| n_whitespaces
int64 0
365k
| n_ast_nodes
int64 3
317k
| n_ast_terminals
int64 1
171k
| n_ast_nonterminals
int64 1
146k
| loc
int64 -1
37.3k
| cycloplexity
int64 -1
1.31k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|
-- Implementation to represent peano numbers
data Pint = Zero | Succ Pint
deriving(Eq,Show)
istZahl :: Pint -> Bool
istZahl x = case x of {Zero -> True; (Succ y) -> istZahl y}
--If the first is a Boolean use istZahl to check for a valid number
istZahlAll :: PintAll -> Bool
istZahlAll (x,y) = case x of {True -> istZahl y; False -> istZahl y}
peanoPlus :: Pint -> Pint -> Pint
peanoPlus x y = if istZahl x && istZahl y then pplus x y else bot
where
pplus x y = case x of
Zero -> y
Succ z -> Succ (pplus z y)
-- If it is a PintAll
-- Check if the first PintAll is positive or negative
-- Then check the second one.... if both are positive or both are negative use peanoPlus and set the Boolean accordingly
-- If one is positive and one is negative find out the bigger value and subtract the numbers by using Integers
peanoPlusAll :: PintAll -> PintAll -> PintAll
peanoPlusAll (v, w) (x,y) = if istZahlAll (v,w) && istZahlAll (x,y) then pplusAll (v,w) (x,y) else bot
where
pplusAll (v,w) (x,y) = case v of
True -> if x then (True, (peanoPlus w y)) else ((peanoLeq y w),intToPeano((peanoToInt(w)-peanoToInt(y))))
False -> if not x then (False, (peanoPlus w y)) else ((peanoLeq w y), (intToPeano((peanoToInt(w)-peanoToInt(y)))))
--test:: PintAll -> PintAll -> Integer
--test (v,w) (x,y) = seq x (peanoToInt(w)-peanoToInt(y))
bot = bot
peanoEq :: Pint -> Pint -> Bool
peanoEq x y = if istZahl x && istZahl y then eq x y else bot
where
eq Zero Zero = True
eq (Succ x) (Succ y) = eq x y
eq _ _ = False
peanoLeq :: Pint -> Pint -> Bool
peanoLeq x y = if istZahl x && istZahl y then leq x y else bot
where
leq Zero y = True
leq x Zero = False
leq (Succ x) (Succ y) = leq x y
peanoMult :: Pint -> Pint -> Pint
peanoMult x y = if istZahl x && istZahl y then mult x y else bot
where
mult x y = case x of
Zero -> Zero
Succ z -> peanoPlus y (mult z y)
-- Konvertieren Integer <-> Pint
-- z.B. zum Testen peanoToInt ( (intToPeano 50) `peanoPlus` (intToPeano 10))
intToPeano :: Integer -> Pint
intToPeano i
| i == 0 = Zero
| i > 0 = Succ (intToPeano (i-1))
| i < 0 = Succ (intToPeano ((i*(-1))-1))
peanoToInt :: Pint -> Integer
peanoToInt Zero = 0
peanoToInt (Succ x) = 1 + (peanoToInt x)
--Converts an Integer to PintAll
peanoAllToInt (neg, pint) = peanoAllToInt_acc pint neg 0
where
peanoAllToInt_acc Zero neg n = n * (if neg then (-1) else 1)
peanoAllToInt_acc (Succ x) neg n = peanoAllToInt_acc x neg (n + 1)
-- Convert PintAll to Integer
-- Uses intToPeano to convert the value and sets the boolean appropriately
intToPeanoAll :: Integer -> PintAll
intToPeanoAll i
| i==0 =(True,Zero)
| i>0 =(True,intToPeano (i))
| i<0 =(False,intToPeano (i*(-1)))
type PintAll = (Bool,Pint)
| situx/Misc | Haskell/peano.hs | gpl-3.0 | 3,000 | 2 | 18 | 844 | 1,084 | 572 | 512 | 49 | 5 |
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Translate.Types.Sum
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
module Network.Google.Translate.Types.Sum where
import Network.Google.Prelude
-- | The format of the text
data TranslationsListFormat
= HTML
-- ^ @html@
-- Specifies the input is in HTML
| Text
-- ^ @text@
-- Specifies the input is in plain textual format
deriving (Eq, Ord, Enum, Read, Show, Data, Typeable, Generic)
instance Hashable TranslationsListFormat
instance FromHttpApiData TranslationsListFormat where
parseQueryParam = \case
"html" -> Right HTML
"text" -> Right Text
x -> Left ("Unable to parse TranslationsListFormat from: " <> x)
instance ToHttpApiData TranslationsListFormat where
toQueryParam = \case
HTML -> "html"
Text -> "text"
instance FromJSON TranslationsListFormat where
parseJSON = parseJSONText "TranslationsListFormat"
instance ToJSON TranslationsListFormat where
toJSON = toJSONText
| rueshyna/gogol | gogol-translate/gen/Network/Google/Translate/Types/Sum.hs | mpl-2.0 | 1,434 | 0 | 11 | 310 | 191 | 110 | 81 | 26 | 0 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.Ml.Projects.Locations.Studies.Trials.Complete
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Marks a trial as complete.
--
-- /See:/ <https://cloud.google.com/ml/ AI Platform Training & Prediction API Reference> for @ml.projects.locations.studies.trials.complete@.
module Network.Google.Resource.Ml.Projects.Locations.Studies.Trials.Complete
(
-- * REST Resource
ProjectsLocationsStudiesTrialsCompleteResource
-- * Creating a Request
, projectsLocationsStudiesTrialsComplete
, ProjectsLocationsStudiesTrialsComplete
-- * Request Lenses
, pXgafv
, pUploadProtocol
, pAccessToken
, pUploadType
, pPayload
, pName
, pCallback
) where
import Network.Google.MachineLearning.Types
import Network.Google.Prelude
-- | A resource alias for @ml.projects.locations.studies.trials.complete@ method which the
-- 'ProjectsLocationsStudiesTrialsComplete' request conforms to.
type ProjectsLocationsStudiesTrialsCompleteResource =
"v1" :>
CaptureMode "name" "complete" Text :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
ReqBody '[JSON] GoogleCloudMlV1__CompleteTrialRequest
:> Post '[JSON] GoogleCloudMlV1__Trial
-- | Marks a trial as complete.
--
-- /See:/ 'projectsLocationsStudiesTrialsComplete' smart constructor.
data ProjectsLocationsStudiesTrialsComplete =
ProjectsLocationsStudiesTrialsComplete'
{ _pXgafv :: !(Maybe Xgafv)
, _pUploadProtocol :: !(Maybe Text)
, _pAccessToken :: !(Maybe Text)
, _pUploadType :: !(Maybe Text)
, _pPayload :: !GoogleCloudMlV1__CompleteTrialRequest
, _pName :: !Text
, _pCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'ProjectsLocationsStudiesTrialsComplete' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'pXgafv'
--
-- * 'pUploadProtocol'
--
-- * 'pAccessToken'
--
-- * 'pUploadType'
--
-- * 'pPayload'
--
-- * 'pName'
--
-- * 'pCallback'
projectsLocationsStudiesTrialsComplete
:: GoogleCloudMlV1__CompleteTrialRequest -- ^ 'pPayload'
-> Text -- ^ 'pName'
-> ProjectsLocationsStudiesTrialsComplete
projectsLocationsStudiesTrialsComplete pPPayload_ pPName_ =
ProjectsLocationsStudiesTrialsComplete'
{ _pXgafv = Nothing
, _pUploadProtocol = Nothing
, _pAccessToken = Nothing
, _pUploadType = Nothing
, _pPayload = pPPayload_
, _pName = pPName_
, _pCallback = Nothing
}
-- | V1 error format.
pXgafv :: Lens' ProjectsLocationsStudiesTrialsComplete (Maybe Xgafv)
pXgafv = lens _pXgafv (\ s a -> s{_pXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
pUploadProtocol :: Lens' ProjectsLocationsStudiesTrialsComplete (Maybe Text)
pUploadProtocol
= lens _pUploadProtocol
(\ s a -> s{_pUploadProtocol = a})
-- | OAuth access token.
pAccessToken :: Lens' ProjectsLocationsStudiesTrialsComplete (Maybe Text)
pAccessToken
= lens _pAccessToken (\ s a -> s{_pAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
pUploadType :: Lens' ProjectsLocationsStudiesTrialsComplete (Maybe Text)
pUploadType
= lens _pUploadType (\ s a -> s{_pUploadType = a})
-- | Multipart request metadata.
pPayload :: Lens' ProjectsLocationsStudiesTrialsComplete GoogleCloudMlV1__CompleteTrialRequest
pPayload = lens _pPayload (\ s a -> s{_pPayload = a})
-- | Required. The trial name.metat
pName :: Lens' ProjectsLocationsStudiesTrialsComplete Text
pName = lens _pName (\ s a -> s{_pName = a})
-- | JSONP
pCallback :: Lens' ProjectsLocationsStudiesTrialsComplete (Maybe Text)
pCallback
= lens _pCallback (\ s a -> s{_pCallback = a})
instance GoogleRequest
ProjectsLocationsStudiesTrialsComplete
where
type Rs ProjectsLocationsStudiesTrialsComplete =
GoogleCloudMlV1__Trial
type Scopes ProjectsLocationsStudiesTrialsComplete =
'["https://www.googleapis.com/auth/cloud-platform"]
requestClient
ProjectsLocationsStudiesTrialsComplete'{..}
= go _pName _pXgafv _pUploadProtocol _pAccessToken
_pUploadType
_pCallback
(Just AltJSON)
_pPayload
machineLearningService
where go
= buildClient
(Proxy ::
Proxy ProjectsLocationsStudiesTrialsCompleteResource)
mempty
| brendanhay/gogol | gogol-ml/gen/Network/Google/Resource/Ml/Projects/Locations/Studies/Trials/Complete.hs | mpl-2.0 | 5,453 | 0 | 16 | 1,187 | 779 | 455 | 324 | 112 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE TypeOperators #-}
{-# OPTIONS_GHC -fno-warn-duplicate-exports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
-- |
-- Module : Network.Google.Resource.CloudPrivateCatalogProducer.Catalogs.Products.Versions.List
-- Copyright : (c) 2015-2016 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Lists Version resources that the producer has access to, within the
-- scope of the parent Product.
--
-- /See:/ <https://cloud.google.com/private-catalog/ Cloud Private Catalog Producer API Reference> for @cloudprivatecatalogproducer.catalogs.products.versions.list@.
module Network.Google.Resource.CloudPrivateCatalogProducer.Catalogs.Products.Versions.List
(
-- * REST Resource
CatalogsProductsVersionsListResource
-- * Creating a Request
, catalogsProductsVersionsList
, CatalogsProductsVersionsList
-- * Request Lenses
, cpvlParent
, cpvlXgafv
, cpvlUploadProtocol
, cpvlAccessToken
, cpvlUploadType
, cpvlPageToken
, cpvlPageSize
, cpvlCallback
) where
import Network.Google.CloudPrivateCatalogProducer.Types
import Network.Google.Prelude
-- | A resource alias for @cloudprivatecatalogproducer.catalogs.products.versions.list@ method which the
-- 'CatalogsProductsVersionsList' request conforms to.
type CatalogsProductsVersionsListResource =
"v1beta1" :>
Capture "parent" Text :>
"versions" :>
QueryParam "$.xgafv" Xgafv :>
QueryParam "upload_protocol" Text :>
QueryParam "access_token" Text :>
QueryParam "uploadType" Text :>
QueryParam "pageToken" Text :>
QueryParam "pageSize" (Textual Int32) :>
QueryParam "callback" Text :>
QueryParam "alt" AltJSON :>
Get '[JSON]
GoogleCloudPrivatecatalogproducerV1beta1ListVersionsResponse
-- | Lists Version resources that the producer has access to, within the
-- scope of the parent Product.
--
-- /See:/ 'catalogsProductsVersionsList' smart constructor.
data CatalogsProductsVersionsList =
CatalogsProductsVersionsList'
{ _cpvlParent :: !Text
, _cpvlXgafv :: !(Maybe Xgafv)
, _cpvlUploadProtocol :: !(Maybe Text)
, _cpvlAccessToken :: !(Maybe Text)
, _cpvlUploadType :: !(Maybe Text)
, _cpvlPageToken :: !(Maybe Text)
, _cpvlPageSize :: !(Maybe (Textual Int32))
, _cpvlCallback :: !(Maybe Text)
}
deriving (Eq, Show, Data, Typeable, Generic)
-- | Creates a value of 'CatalogsProductsVersionsList' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'cpvlParent'
--
-- * 'cpvlXgafv'
--
-- * 'cpvlUploadProtocol'
--
-- * 'cpvlAccessToken'
--
-- * 'cpvlUploadType'
--
-- * 'cpvlPageToken'
--
-- * 'cpvlPageSize'
--
-- * 'cpvlCallback'
catalogsProductsVersionsList
:: Text -- ^ 'cpvlParent'
-> CatalogsProductsVersionsList
catalogsProductsVersionsList pCpvlParent_ =
CatalogsProductsVersionsList'
{ _cpvlParent = pCpvlParent_
, _cpvlXgafv = Nothing
, _cpvlUploadProtocol = Nothing
, _cpvlAccessToken = Nothing
, _cpvlUploadType = Nothing
, _cpvlPageToken = Nothing
, _cpvlPageSize = Nothing
, _cpvlCallback = Nothing
}
-- | The resource name of the parent resource.
cpvlParent :: Lens' CatalogsProductsVersionsList Text
cpvlParent
= lens _cpvlParent (\ s a -> s{_cpvlParent = a})
-- | V1 error format.
cpvlXgafv :: Lens' CatalogsProductsVersionsList (Maybe Xgafv)
cpvlXgafv
= lens _cpvlXgafv (\ s a -> s{_cpvlXgafv = a})
-- | Upload protocol for media (e.g. \"raw\", \"multipart\").
cpvlUploadProtocol :: Lens' CatalogsProductsVersionsList (Maybe Text)
cpvlUploadProtocol
= lens _cpvlUploadProtocol
(\ s a -> s{_cpvlUploadProtocol = a})
-- | OAuth access token.
cpvlAccessToken :: Lens' CatalogsProductsVersionsList (Maybe Text)
cpvlAccessToken
= lens _cpvlAccessToken
(\ s a -> s{_cpvlAccessToken = a})
-- | Legacy upload protocol for media (e.g. \"media\", \"multipart\").
cpvlUploadType :: Lens' CatalogsProductsVersionsList (Maybe Text)
cpvlUploadType
= lens _cpvlUploadType
(\ s a -> s{_cpvlUploadType = a})
-- | A pagination token returned from a previous call to ListVersions that
-- indicates where this listing should continue from. This field is
-- optional.
cpvlPageToken :: Lens' CatalogsProductsVersionsList (Maybe Text)
cpvlPageToken
= lens _cpvlPageToken
(\ s a -> s{_cpvlPageToken = a})
-- | The maximum number of versions to return.
cpvlPageSize :: Lens' CatalogsProductsVersionsList (Maybe Int32)
cpvlPageSize
= lens _cpvlPageSize (\ s a -> s{_cpvlPageSize = a})
. mapping _Coerce
-- | JSONP
cpvlCallback :: Lens' CatalogsProductsVersionsList (Maybe Text)
cpvlCallback
= lens _cpvlCallback (\ s a -> s{_cpvlCallback = a})
instance GoogleRequest CatalogsProductsVersionsList
where
type Rs CatalogsProductsVersionsList =
GoogleCloudPrivatecatalogproducerV1beta1ListVersionsResponse
type Scopes CatalogsProductsVersionsList =
'["https://www.googleapis.com/auth/cloud-platform"]
requestClient CatalogsProductsVersionsList'{..}
= go _cpvlParent _cpvlXgafv _cpvlUploadProtocol
_cpvlAccessToken
_cpvlUploadType
_cpvlPageToken
_cpvlPageSize
_cpvlCallback
(Just AltJSON)
cloudPrivateCatalogProducerService
where go
= buildClient
(Proxy :: Proxy CatalogsProductsVersionsListResource)
mempty
| brendanhay/gogol | gogol-cloudprivatecatalogproducer/gen/Network/Google/Resource/CloudPrivateCatalogProducer/Catalogs/Products/Versions/List.hs | mpl-2.0 | 6,210 | 0 | 18 | 1,386 | 883 | 512 | 371 | 128 | 1 |
{-# LANGUAGE BangPatterns
, MultiParamTypeClasses #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
-- 'Convertible' instances for conversions between pixel types.
module Vision.Image.Conversion (Convertible (..), convert) where
import Data.Convertible (Convertible (..), ConvertResult, convert)
import Data.Word
import qualified Data.Vector.Storable as VS
import Vision.Image.Grey.Type (GreyPixel (..))
import Vision.Image.HSV.Type (HSVPixel (..))
import Vision.Image.RGBA.Type (RGBAPixel (..))
import Vision.Image.RGB.Type (RGBPixel (..))
-- to Grey ---------------------------------------------------------------------
instance Convertible GreyPixel GreyPixel where
safeConvert = Right
{-# INLINE safeConvert #-}
instance Convertible HSVPixel GreyPixel where
safeConvert pix = (safeConvert pix :: ConvertResult RGBPixel)
>>= safeConvert
instance Convertible RGBAPixel GreyPixel where
safeConvert !(RGBAPixel r g b a) =
Right $ GreyPixel $ word8 $ int (rgbToGrey r g b) * int a `quot` 255
{-# INLINE safeConvert #-}
instance Convertible RGBPixel GreyPixel where
safeConvert !(RGBPixel r g b) =
Right $ GreyPixel $ rgbToGrey r g b
{-# INLINE safeConvert #-}
-- | Converts the colors to greyscale using the human eye colors perception.
rgbToGrey :: Word8 -> Word8 -> Word8 -> Word8
rgbToGrey !r !g !b = (redLookupTable VS.! int r)
+ (greenLookupTable VS.! int g)
+ (blueLookupTable VS.! int b)
{-# INLINE rgbToGrey #-}
redLookupTable, greenLookupTable, blueLookupTable :: VS.Vector Word8
redLookupTable = VS.generate 256 (\val -> round $ double val * 0.299)
greenLookupTable = VS.generate 256 (\val -> round $ double val * 0.587)
blueLookupTable = VS.generate 256 (\val -> round $ double val * 0.114)
-- to HSV ----------------------------------------------------------------------
instance Convertible HSVPixel HSVPixel where
safeConvert = Right
{-# INLINE safeConvert #-}
instance Convertible GreyPixel HSVPixel where
safeConvert pix = (safeConvert pix :: ConvertResult RGBPixel)
>>= safeConvert
instance Convertible RGBPixel HSVPixel where
-- Based on :
-- http://en.wikipedia.org/wiki/HSL_and_HSV#General_approach
safeConvert !(RGBPixel r g b) =
Right pix
where
(!r', !g', !b') = (int r, int g, int b)
!pix | r >= g && r >= b = -- r == max r g b
let !c = r' - min b' g'
!h = fixHue $ hue c b' g' -- Hue can be negative
in HSVPixel (word8 h) (sat c r') r
| g >= r && g >= b = -- g == max r g b
let !c = g' - min r' b'
!h = 60 + hue c r' b'
in HSVPixel (word8 h) (sat c g') g
| otherwise = -- b == max r g b
let !c = b' - min r' g'
!h = 120 + hue c g' r'
in HSVPixel (word8 h) (sat c b') b
-- Returns a value in [-30; +30].
hue 0 _ _ = 0
hue !c !left !right = (30 * (right - left)) `quot` c
sat _ 0 = 0
sat !c v = word8 $ (c * 255) `quot` v
-- Keeps the value of the hue between [0, 179].
-- As the Hue's unit is 2°, 180 is equal to 360° and to 0°.
fixHue !h | h < 0 = h + 180
| otherwise = h
instance Convertible RGBAPixel HSVPixel where
safeConvert pix = (safeConvert pix :: ConvertResult RGBPixel)
>>= safeConvert
-- to RGB ----------------------------------------------------------------------
instance Convertible RGBPixel RGBPixel where
safeConvert = Right
{-# INLINE safeConvert #-}
instance Convertible GreyPixel RGBPixel where
safeConvert !(GreyPixel pix) = Right $ RGBPixel pix pix pix
{-# INLINE safeConvert #-}
instance Convertible RGBAPixel RGBPixel where
safeConvert !(RGBAPixel r g b a) =
Right $ RGBPixel (withAlpha r) (withAlpha g) (withAlpha b)
where
!a' = int a
withAlpha !val = word8 $ int val * a' `quot` 255
{-# INLINE withAlpha #-}
{-# INLINE safeConvert #-}
instance Convertible HSVPixel RGBPixel where
-- Based on :
-- http://en.wikipedia.org/wiki/HSL_and_HSV#Converting_to_RGB
safeConvert !(HSVPixel h s v) =
Right $! case h `quot` 30 of
0 -> RGBPixel v (word8 x1') (word8 m)
1 -> RGBPixel (word8 (x2 60)) v (word8 m)
2 -> RGBPixel (word8 m) v (word8 (x1 60))
3 -> RGBPixel (word8 m) (word8 (x2 120)) v
4 -> RGBPixel (word8 (x1 120)) (word8 m) v
5 -> RGBPixel v (word8 m) (word8 (x2 180))
_ -> error "Invalid hue value."
where
(!h', v') = (int h, int v)
-- v is the major color component whereas m is the minor one.
!m = (v' * (255 - int s)) `quot` 255
-- Computes the remaining component by resolving the hue equation,
-- knowing v and m. x1 is when the component is on the right of the
-- major one, x2 when on the left.
x1 d = (d * m - d * v' + h' * v' - h' * m + 30 * m) `quot` 30
x1' = ( h' * v' - h' * m + 30 * m) `quot` 30 -- == x1 0
x2 d = (d * v' - d * m + h' * m - h' * v' + 30 * m) `quot` 30
{-# INLINE safeConvert #-}
-- to RGBA ---------------------------------------------------------------------
instance Convertible RGBAPixel RGBAPixel where
safeConvert = Right
{-# INLINE safeConvert #-}
instance Convertible GreyPixel RGBAPixel where
safeConvert !(GreyPixel pix) = Right $ RGBAPixel pix pix pix 255
{-# INLINE safeConvert #-}
instance Convertible HSVPixel RGBAPixel where
safeConvert pix = (safeConvert pix :: ConvertResult RGBPixel)
>>= safeConvert
instance Convertible RGBPixel RGBAPixel where
safeConvert !(RGBPixel r g b) = Right $ RGBAPixel r g b 255
{-# INLINE safeConvert #-}
-- -----------------------------------------------------------------------------
double :: Integral a => a -> Double
double = fromIntegral
int :: Integral a => a -> Int
int = fromIntegral
word8 :: Integral a => a -> Word8
word8 = fromIntegral
| RaphaelJ/friday | src/Vision/Image/Conversion.hs | lgpl-3.0 | 6,333 | 0 | 18 | 1,876 | 1,778 | 918 | 860 | 112 | 1 |
module Chess.A278211Spec (main, spec) where
import Test.Hspec
import Chess.A278211 (a278211)
main :: IO ()
main = hspec spec
spec :: Spec
spec = describe "A278211" $
it "correctly computes the first 3 elements" $
map a278211 [1..3] `shouldBe` expectedValue where
expectedValue = [0,4,12]
| peterokagey/haskellOEIS | test/Chess/A278211Spec.hs | apache-2.0 | 306 | 0 | 8 | 61 | 103 | 58 | 45 | 10 | 1 |
{-# LANGUAGE PatternGuards #-}
{-# LANGUAGE TupleSections #-}
{-# LANGUAGE TypeOperators #-}
-- | K3 Program constructor
module Language.K3.Parser.ProgramBuilder (
defaultIncludes,
defaultRoleName,
processInitsAndRoles,
endpointMethods,
bindSource,
mkRunSourceE,
mkRunSinkE,
declareBuiltins,
resolveFn
) where
import Data.Char ( isPunctuation )
import Data.Hashable
import Data.List
import Data.Tree
import Debug.Trace
import Language.K3.Core.Annotation
import Language.K3.Core.Common
import Language.K3.Core.Declaration
import Language.K3.Core.Expression
import Language.K3.Core.Type
import Language.K3.Core.Utils
import qualified Language.K3.Core.Constructor.Type as TC
import qualified Language.K3.Core.Constructor.Expression as EC
import qualified Language.K3.Core.Constructor.Declaration as DC
import Language.K3.Utils.Pretty
-- | Type synonyms, copied from the parser.
type EndpointInfo = (EndpointSpec, Maybe [Identifier], Identifier, Maybe (K3 Expression))
{- Default includes required by the program builder. -}
defaultIncludes :: [String]
defaultIncludes = map (\s -> unwords ["include", show s])
[ "Annotation/Collection.k3"
, "Annotation/Set.k3"
, "Annotation/Map.k3"
, "Annotation/Maps/SortedMap.k3" ]
{- Names -}
defaultRoleName :: Identifier
defaultRoleName = "__global"
myId :: Identifier
myId = "me"
peersId :: Identifier
peersId = "peers"
myAddr :: K3 Expression
myAddr = EC.variable myId
chrName :: Identifier -> Identifier
chrName n = n++"HasRead"
crName :: Identifier -> Identifier
crName n = n++"Read"
cmhrName :: Identifier -> Identifier
cmhrName n = n++"MuxHasRead"
cmrName :: Identifier -> Identifier
cmrName n = n++"MuxRead"
cpohrName :: Identifier -> Identifier
cpohrName n = n++"POrdHasRead"
cporName :: Identifier -> Identifier
cporName n = n++"POrdRead"
cpdhrName :: Identifier -> Identifier
cpdhrName n = n++"PDataHasRead"
cpdrName :: Identifier -> Identifier
cpdrName n = n++"PDataRead"
chwName :: Identifier -> Identifier
chwName n = n++"HasWrite"
cwName :: Identifier -> Identifier
cwName n = n++"Write"
ciName :: Identifier -> Identifier
ciName n = n++"Init"
csName :: Identifier -> Identifier
csName n = n++"Start"
cpName :: Identifier -> Identifier
cpName n = n++"Process"
cfName :: Identifier -> Identifier
cfName n = n++"Feed"
ccName :: Identifier -> Identifier
ccName n = n++"Controller"
cmcName :: Identifier -> Identifier
cmcName n = n++"MuxCounter"
cfiName :: Identifier -> Identifier
cfiName n = n++"FileIndex"
cfpName :: Identifier -> Identifier
cfpName n = n++"FilePath"
cfmpName :: Identifier -> Identifier
cfmpName n = n++"FilePositions"
cfmbName :: Identifier -> Identifier
cfmbName n = n++"MuxBuffer"
cfmdName :: Identifier -> Identifier
cfmdName n = n++"DefaultElem"
cfpcompleteName :: Identifier -> Identifier
cfpcompleteName n = n++"FilesComplete"
{- Runtime functions -}
openBuiltinFn :: K3 Expression
openBuiltinFn = EC.variable "openBuiltin"
openFileFn :: K3 Expression
openFileFn = EC.variable "openFile"
openSocketFn :: K3 Expression
openSocketFn = EC.variable "openSocket"
closeFn :: K3 Expression
closeFn = EC.variable "close"
resolveFn :: K3 Expression
resolveFn = EC.variable "resolve"
registerSocketDataTriggerFn :: K3 Expression
registerSocketDataTriggerFn = EC.variable "registerSocketDataTrigger"
{- Top-level functions -}
roleId :: Identifier
roleId = "role"
roleVar :: K3 Expression
roleVar = EC.variable roleId
roleElemId :: Identifier
roleElemId = "rolerec"
roleElemVar :: K3 Expression
roleElemVar = EC.variable roleElemId
roleElemLbl :: Identifier
roleElemLbl = "i"
roleFnId :: Identifier
roleFnId = "processRole"
{- Declaration construction -}
builtinGlobal :: Identifier -> K3 Type -> Maybe (K3 Expression) -> K3 Declaration
builtinGlobal n t eOpt = (DC.global n t eOpt) @+ (DSpan $ GeneratedSpan $ fromIntegral $ hash "builtin")
builtinTrigger :: Identifier -> K3 Type -> K3 Expression -> K3 Declaration
builtinTrigger n t e = (DC.trigger n t e) @+ (DSpan $ GeneratedSpan $ fromIntegral $ hash "builtin")
{- Type qualification -}
qualifyT :: K3 Type -> K3 Type
qualifyT t = if null $ filter isTQualified $ annotations t then t @+ TImmutable else t
qualifyE :: K3 Expression -> K3 Expression
qualifyE e = if null $ filter isEQualified $ annotations e then e @+ EImmutable else e
{- Desugaring methods -}
-- TODO: replace with Template Haskell
processInitsAndRoles :: K3 Declaration -> [(Identifier, EndpointInfo)] -> K3 Declaration
processInitsAndRoles (Node t c) endpointBQGs = Node t $ c ++ initializerFns
where
(sinkEndpoints, sourceEndpoints) = partition matchSink endpointBQGs
matchSink (_,(_, Nothing, _, _)) = True
matchSink _ = False
initializerFns =
[ builtinGlobal roleFnId (qualifyT unitFnT)
$ Just . qualifyE $ mkRoleBody sourceEndpoints sinkEndpoints ]
sinkInitE acc (_,(_, Nothing, _, Just e)) = acc ++ [e]
sinkInitE acc _ = acc
-- TODO handle empty sinks or sources
mkRoleBody sources sinks =
EC.lambda "_" $ EC.block $
(foldl sinkInitE [] sinks) ++
[EC.applyMany (EC.project "iterate" roleVar)
[EC.lambda roleElemId $ foldl dispatchId EC.unit sources]]
dispatchId elseE (n,(_,_,y,goE)) = EC.ifThenElse (eqRole y) (runE n goE) elseE
eqRole n = EC.binop OEqu (EC.project roleElemLbl roleElemVar) (EC.constant $ CString n)
runE _ (Just goE) = goE
runE n Nothing = EC.applyMany (EC.variable $ cpName n) [EC.unit]
unitFnT = TC.function TC.unit TC.unit
{- Code generation methods-}
-- TODO: replace with Template Haskell
endpointMethods :: Bool -> EndpointSpec -> K3 Expression -> K3 Expression
-> Identifier -> K3 Type
-> (EndpointSpec, Maybe (K3 Expression), [K3 Declaration])
endpointMethods isSource eSpec argE formatE n t =
if isSource then sourceDecls else sinkDecls
where
sourceDecls = (eSpec, Nothing,) $
sourceExtraDecls
++ (map mkMethod $ [mkInit, mkStart, mkFinal] ++ sourceReadDecls)
++ [sourceController]
sinkDecls = (eSpec, Just sinkImpl, map mkMethod [mkInit, mkFinal, sinkHasWrite, sinkWrite])
-- Endpoint-specific declarations
sourceReadDecls = case eSpec of
FileMuxEP _ _ _ -> [sourceMuxHasRead, sourceMuxRead]
FileMuxseqEP _ _ _ -> [sourceMuxHasRead, sourceMuxRead]
PolyFileMuxEP _ _ _ _ _ -> [sourcePOrdHasRead, sourcePOrdRead, sourcePolyHasRead, sourcePolyRead]
PolyFileMuxSeqEP _ _ _ _ _ -> [sourcePOrdHasRead, sourcePOrdRead, sourcePolyHasRead, sourcePolyRead]
_ -> [sourceHasRead, sourceRead]
sourceExtraDecls = case eSpec of
FileSeqEP _ _ _ -> [ builtinGlobal (cfiName n) (TC.int @+ TMutable) Nothing
, builtinGlobal (cfpName n) (TC.string @+ TMutable) Nothing ]
FileMuxEP _ _ _ -> [ builtinGlobal (cfmpName n) muxPosMap Nothing
, builtinGlobal (cfmbName n) muxBuffer Nothing
, builtinGlobal (cfmdName n) t Nothing
, builtinGlobal (cmcName n) (TC.int @+ TMutable) (Just $ EC.constant $ CInt 0) ]
FileMuxseqEP _ _ _ -> [ builtinGlobal (cfmpName n) muxPosMap Nothing
, builtinGlobal (cfmbName n) muxBuffer Nothing
, builtinGlobal (cfmdName n) t Nothing
, builtinGlobal (cfiName n) muxSeqIdxMap Nothing
, builtinGlobal (cfpName n) muxSeqPathMap Nothing
, builtinGlobal (cmcName n) (TC.int @+ TMutable) (Just $ EC.constant $ CInt 0) ]
PolyFileMuxEP _ _ _ _ _ -> [ builtinGlobal (cfpcompleteName n) pmuxDoneMap Nothing
, builtinGlobal (cmcName n) (TC.int @+ TMutable) (Just $ EC.constant $ CInt 0) ]
PolyFileMuxSeqEP _ _ _ _ _ -> [ builtinGlobal (cfpcompleteName n) pmuxDoneMap Nothing
, builtinGlobal (cfiName n) muxSeqIdxMap Nothing
, builtinGlobal (cfpName n) muxSeqPathMap Nothing
, builtinGlobal (cmcName n) (TC.int @+ TMutable) (Just $ EC.constant $ CInt 0) ]
_ -> []
mkMethod (m, argT, retT, eOpt) =
builtinGlobal (n++m) (qualifyT $ TC.function argT retT)
$ maybe Nothing (Just . qualifyE) eOpt
mkInit = ("Init", TC.unit, TC.unit, Just $ EC.lambda "_" $ initE)
mkStart = ("Start", TC.unit, TC.unit, Just $ EC.lambda "_" $ startE)
mkFinal = ("Final", TC.unit, TC.unit, Just $ EC.lambda "_" $ closeE)
mkCollection fields ctype = (TC.collection $ TC.record $ map (qualifyT <$>) fields) @+ TAnnotation ctype
muxSeqLabel = "order"
muxDataLabel = "value"
muxPosMap = mkCollection [("key", TC.int), ("value", TC.int)] "SortedMap"
muxBuffer = mkCollection [("key", TC.int), ("value", t)] "Map"
muxFullT = TC.record [("order", TC.int), ("value", t)]
muxSeqIdxMap = mkCollection [("key", TC.int), ("value", TC.int)] "Map"
muxSeqPathMap = mkCollection [("key", TC.int), ("value", TC.string)] "Map"
pmuxDoneMap = mkCollection [("key", TC.int), ("value", TC.bool)] "Map"
sourceController = case eSpec of
FileSeqEP _ txt _ -> seqSrcController txt
FileMuxEP _ _ _ -> muxSrcController
FileMuxseqEP _ txt _ -> muxSeqSrcController txt
PolyFileMuxEP _ _ _ _ sv -> pmuxSrcController sv
PolyFileMuxSeqEP _ txt _ _ sv -> pmuxSeqSrcController txt sv
_ -> singleSrcController
sinkImpl =
EC.lambda "__msg"
(EC.ifThenElse
(EC.applyMany (EC.variable $ chwName n) [EC.unit])
(EC.applyMany (EC.variable $ cwName n) [EC.variable "__msg"])
(EC.unit))
singleSrcController = builtinTrigger (ccName n) TC.unit $
EC.lambda "_" $
EC.ifThenElse
(EC.applyMany (EC.variable $ chrName n) [EC.unit])
(controlE $ EC.applyMany (EC.variable $ cpName n) [EC.unit])
EC.unit
{-----------------------------------------
- File sequence controler and utilities.
-----------------------------------------}
seqSrcController txt = builtinTrigger (ccName n) TC.unit $
EC.lambda "_" $
EC.ifThenElse
(EC.applyMany (EC.variable $ chrName n) [EC.unit])
(controlE $ EC.applyMany (EC.variable $ cpName n) [EC.unit])
$ EC.block
[ nextFileIndexE
, EC.ifThenElse notLastFileIndexE
(EC.block [openSeqNextFileE False True openFileFn txt, controlRcrE])
EC.unit ]
nextFileIndexE =
EC.assign (cfiName n) $ EC.binop OAdd (EC.variable $ cfiName n) (EC.constant $ CInt 1)
notLastFileIndexE =
EC.binop OLth (EC.variable $ cfiName n) (EC.applyMany (EC.project "size" argE) [EC.unit])
openSeqNextFileE withTest withClose openFn txt = openSeqWithTest withTest $
EC.block $
[ assignSeqPathE ]
++ (if withClose then [closeE] else [])
++ [ EC.applyMany openFn [EC.variable "me", sourceId n, EC.variable (cfpName n), formatE, EC.constant $ CBool txt, modeE] ]
assignSeqPathE = EC.assign (cfpName n) $ EC.project "path" $ EC.applyMany (EC.project "at" argE) [EC.variable $ cfiName n]
openSeqWithTest withTest openE =
if withTest then EC.ifThenElse notLastFileIndexE openE EC.unit else openE
{---------------------------------------------
- File multiplexer controller and utilities.
---------------------------------------------}
muxSrcControllerTrig onFileDoneE = builtinTrigger (ccName n) TC.unit $
EC.lambda "_" $
EC.ifThenElse
(EC.binop OLth (EC.constant $ CInt 0) $
EC.applyMany (EC.project "size" $ EC.variable $ cfmpName n) [EC.unit])
(controlE $ EC.applyMany (EC.project "min" $ EC.variable $ cfmpName n)
[EC.lambda "_" EC.unit, EC.lambda muxid $ doMuxNext onFileDoneE])
EC.unit
muxSrcController = muxSrcControllerTrig muxFinishChan
muxSeqSrcController txt = muxSrcControllerTrig $ muxSeqNextChan openFileFn txt
muxid = "muxnext"
muxvar = EC.variable muxid
muxidx = EC.project "value" muxvar
muxChanIdE e = EC.binop OConcat (EC.constant $ CString $ n ++ "_")
(EC.applyMany (EC.variable "itos") [e])
globalMuxChanIdE = muxChanIdE $ EC.variable $ cmcName n
cntrlrMuxChanIdE = muxChanIdE muxidx
doMuxNext onFileDoneE =
EC.block [ muxNextFromChan, muxSafeRefreshChan True onFileDoneE muxidx ]
muxNextFromChan =
EC.applyMany (EC.project "lookup" $ EC.variable $ cfmbName n)
[ EC.record [("key", muxidx), ("value", EC.variable $ cfmdName n)]
, ignoreE
, EC.lambda "x" $ EC.applyMany (EC.variable $ cfName n) [EC.project "value" $ EC.variable "x"] ]
muxSafeRefreshChan withErase onFileDoneE muxIdE =
EC.ifThenElse (EC.applyMany (EC.variable $ cmhrName n) [muxIdE])
(muxRefreshChan withErase muxIdE)
onFileDoneE
muxRefreshChan withErase muxIdE =
EC.applyMany
(EC.lambda "next" $ EC.block $
[ EC.applyMany (EC.project "insert" $ EC.variable $ cfmbName n)
[EC.record [("key", muxIdE), ("value", EC.project muxDataLabel $ EC.variable "next")]]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfmpName n)
[EC.record [("key", EC.project muxSeqLabel $ EC.variable "next"), ("value", muxIdE)]]
] ++ (if withErase
then [EC.applyMany (EC.project "erase" $ EC.variable $ cfmpName n) [muxvar]]
else []))
[EC.applyMany (EC.variable $ cmrName n) [muxIdE]]
muxFinishChan = EC.block
[ EC.applyMany (EC.project "erase" $ EC.variable $ cfmbName n)
[EC.record [("key", muxidx), ("value", EC.variable $ cfmdName n)]]
, EC.applyMany (EC.project "erase" $ EC.variable $ cfmpName n) [muxvar]]
muxSeqNextChan openFn txt =
EC.applyMany (EC.project "safe_at" $ argE)
[ muxidx
, ignoreE
, EC.lambda "seqc" $
EC.applyMany (EC.project "lookup" $ EC.variable $ cfiName n)
[ muxSeqIdx $ EC.constant $ CInt 0
, ignoreE
, EC.lambda "seqidx" $
EC.ifThenElse (muxSeqNotLastFileIndexE "seqc" "seqidx")
(EC.block [muxSeqNextFileE openFn "seqc" "seqidx" txt])
(EC.block [muxFinishChan, muxSeqFinishChan muxSeqIdx]) ]]
muxSeqNextFileE openFn seqvar idxvar txt =
EC.letIn "nextidx"
(EC.binop OAdd (EC.project "value" $ EC.variable idxvar) $ EC.constant $ CInt 1)
(EC.applyMany (EC.project "safe_at" $ EC.project "seq" $ EC.variable seqvar)
[ EC.variable "nextidx"
, ignoreE
, EC.lambda "f" $ EC.block
[ EC.applyMany closeFn [EC.variable "me", cntrlrMuxChanIdE]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfiName n) [muxSeqIdx $ EC.variable "nextidx"]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfpName n) [muxSeqIdx $ EC.project "path" $ EC.variable "f"]
, EC.applyMany openFn [EC.variable "me", cntrlrMuxChanIdE, EC.project "path" $ EC.variable "f", formatE, EC.constant $ CBool txt, modeE]
, muxSafeRefreshChan True EC.unit muxidx]])
muxSeqNotLastFileIndexE seqvar idxvar =
EC.binop OLth (EC.project "value" $ EC.variable idxvar) $
EC.binop OSub (EC.applyMany (EC.project "size" $ EC.project "seq" $ EC.variable seqvar) [EC.unit])
(EC.constant $ CInt 1)
muxSeqFinishChan muxSeqFn = EC.block
[ EC.applyMany (EC.project "erase" $ EC.variable $ cfiName n) [muxSeqFn $ EC.constant $ CInt 0]
, EC.applyMany (EC.project "erase" $ EC.variable $ cfpName n) [muxSeqFn $ EC.constant $ CString ""]]
muxSeqIdx e = EC.record [("key", muxidx), ("value", e)]
{-------------------------------------------------
- Poly-File multiplexer controller and utilities.
-------------------------------------------------}
pmuxidx = "pmuxidx"
pmuxvar = EC.variable pmuxidx
pmuxnext = "pmuxnext"
pmuxnextvar = EC.variable pmuxnext
pmuxOrderChanIdE = EC.constant $ CString $ n ++ "_order"
cntrlrPMuxChanIdE = muxChanIdE pmuxvar
pmuxSrcControllerTrig onFileDoneE rbsizeV = builtinTrigger (ccName n) TC.unit $
EC.lambda "_" $
EC.ifThenElse
(EC.binop OGth
(EC.applyMany (EC.project "size" argE) [EC.unit]) $
EC.applyMany (EC.project "size" $ EC.variable $ cfpcompleteName n) [EC.unit])
(EC.ifThenElse
(EC.applyMany (EC.variable $ cpohrName n) [EC.unit])
(controlE $ EC.applyMany
(EC.lambda pmuxidx $ pmuxNextOrderE onFileDoneE rbsizeV)
[EC.applyMany (EC.variable $ cporName n) [EC.unit]])
EC.unit)
EC.unit
{- Polyfile controllers. -}
pmuxSrcController rbsizeV = pmuxSrcControllerTrig pmuxFinishChan rbsizeV
pmuxSeqSrcController txt rbsizeV =
pmuxSrcControllerTrig (pmuxSeqNextChan openFileFn txt rbsizeV) rbsizeV
{- Polyfile controller codegen. -}
pmuxNextOrderE onFileDoneE rbsizeV =
EC.ifThenElse (EC.binop OOr
(EC.binop OGeq pmuxvar $ EC.applyMany (EC.project "size" argE) [EC.unit])
$ (EC.applyMany (EC.project "member" $ EC.variable $ cfpcompleteName n)
[EC.record [("key", pmuxvar), ("value", EC.constant $ CBool True)]]))
EC.unit
(pmuxSafeNextChan onFileDoneE rbsizeV)
pmuxSafeNextChan onFileDoneE rbsizeV =
EC.ifThenElse (EC.applyMany (EC.variable $ cpdhrName n) [pmuxvar])
(pmuxNextChan rbsizeV)
onFileDoneE
pmuxNextChan rbsizeV =
EC.applyMany
(EC.lambda pmuxnext $
EC.letIn "buffer" defaultBuffer $
EC.block
[ EC.applyMany (EC.project "load" $ EC.variable "buffer") [pmuxnextvar]
, EC.ifThenElse
(EC.binop OEqu (EC.variable rbsizeV) $ EC.constant $ CInt 0)
noRebufferE
rebufferE])
[EC.applyMany (EC.variable $ cpdrName n) [pmuxvar]]
where
feedBufferE bufE = EC.applyMany (EC.variable $ cfName n) [bufE]
noRebufferE = feedBufferE $ EC.variable "buffer"
rebufferE =
EC.applyMany (EC.project "iterate" $ EC.applyMany (EC.project "splitMany" $ EC.variable "buffer")
[EC.variable rbsizeV])
[EC.lambda "rebuf" $ EC.block
[ EC.applyMany (EC.project "unpack" $ EC.project "elem" $ EC.variable "rebuf") [EC.unit]
, feedBufferE $ EC.project "elem" $ EC.variable "rebuf"]]
defaultBuffer = either error debugDefault $ defaultExpression cleanT
debugDefault dt = if True then dt else trace (boxToString $
["Default buffer expr: "] ++ prettyLines dt ++
["CleanT: "] ++ prettyLines cleanT) dt
pmuxFinishChan =
EC.applyMany (EC.project "insert" $ EC.variable $ cfpcompleteName n)
[EC.record [("key", pmuxvar), ("value", EC.constant $ CBool True)]]
pmuxSeqNextChan openFn txt rbsizeV =
EC.applyMany (EC.project "safe_at" $ argE)
[ pmuxvar
, ignoreE
, EC.lambda "seqc" $
EC.applyMany (EC.project "lookup" $ EC.variable $ cfiName n)
[ pmuxSeqIdx $ EC.constant $ CInt 0
, ignoreE
, EC.lambda "seqidx" $
EC.ifThenElse (muxSeqNotLastFileIndexE "seqc" "seqidx")
(EC.block [pmuxSeqNextFileE openFn txt rbsizeV "seqc" "seqidx"])
(EC.block [pmuxFinishChan, muxSeqFinishChan pmuxSeqIdx]) ]]
pmuxSeqNextFileE openFn txt rbsizeV seqvar idxvar =
EC.letIn "nextidx"
(EC.binop OAdd (EC.project "value" $ EC.variable idxvar) $ EC.constant $ CInt 1)
(EC.applyMany (EC.project "safe_at" $ EC.project "seq" $ EC.variable seqvar)
[ EC.variable "nextidx"
, ignoreE
, EC.lambda "f" $ EC.block
[ EC.applyMany closeFn [EC.variable "me", cntrlrPMuxChanIdE]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfiName n) [pmuxSeqIdx $ EC.variable "nextidx"]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfpName n) [pmuxSeqIdx $ EC.project "path" $ EC.variable "f"]
, EC.applyMany openFn [EC.variable "me", cntrlrPMuxChanIdE, EC.project "path" $ EC.variable "f", formatE, EC.constant $ CBool txt, modeE]
, pmuxSafeNextChan EC.unit rbsizeV]])
pmuxSeqIdx e = EC.record [("key", pmuxvar), ("value", e)]
-- External functions
cleanT = stripTUIDSpan $ case eSpec of
FileMuxEP _ _ _ -> muxFullT
FileMuxseqEP _ _ _ -> muxFullT
_ -> t
sourceHasRead = ("HasRead", TC.unit, TC.bool, Nothing)
sourceRead = ("Read", TC.unit, cleanT, Nothing)
sourceMuxHasRead = ("MuxHasRead", TC.int, TC.bool, Nothing)
sourceMuxRead = ("MuxRead", TC.int, cleanT, Nothing)
sourcePOrdHasRead = ("POrdHasRead", TC.unit, TC.bool, Nothing)
sourcePOrdRead = ("POrdRead", TC.unit, TC.int, Nothing)
sourcePolyHasRead = ("PDataHasRead", TC.int, TC.bool, Nothing)
sourcePolyRead = ("PDataRead", TC.int, TC.string, Nothing)
sinkHasWrite = ("HasWrite", TC.unit, TC.bool, Nothing)
sinkWrite = ("Write", cleanT, TC.unit, Nothing)
initE = case eSpec of
BuiltinEP _ _ -> EC.applyMany openBuiltinFn [sourceId n, argE, formatE]
FileEP _ txt _ -> openFnE openFileFn txt
NetworkEP _ txt _ -> openFnE openSocketFn txt
FileSeqEP _ txt _ -> openFileSeqFnE openFileFn txt
FileMuxEP _ txt _ -> openFileMuxChanFnE openFileFn txt
FileMuxseqEP _ txt _ -> openFileMuxSeqChanFnE openFileFn txt
PolyFileMuxEP _ txt _ orderpath _ -> openPolyFileFnE openFileFn orderpath txt
PolyFileMuxSeqEP _ txt _ orderpath _ -> openPolyFileSeqFnE openFileFn orderpath txt
_ -> error "Invalid endpoint argument"
openFnE openFn txt = EC.applyMany openFn [EC.variable "me", sourceId n, argE, formatE, EC.constant $ CBool txt, modeE]
openFileSeqFnE openFn txt =
EC.block [ EC.assign (cfiName n) (EC.constant $ CInt 0), openSeqNextFileE True False openFn txt]
openMuxSeqIdx e = EC.record [("key", EC.variable $ cmcName n), ("value", e)]
openFileMuxChanFnE openFn txt =
EC.applyMany (EC.project "iterate" argE)
[EC.lambda "f" $ EC.block
[ EC.applyMany openFn [EC.variable "me", globalMuxChanIdE, EC.project "path" $ EC.variable "f", formatE, EC.constant $ CBool txt, modeE]
, muxSafeRefreshChan False EC.unit $ EC.variable $ cmcName n
, EC.assign (cmcName n) $ EC.binop OAdd (EC.variable $ cmcName n) (EC.constant $ CInt 1) ]]
openFileMuxSeqChanFnE openFn txt =
EC.applyMany (EC.project "iterate" argE)
[ EC.lambda "seqc" $
EC.applyMany (EC.project "safe_at" $ EC.project "seq" $ EC.variable "seqc")
[ EC.constant $ CInt 0
, ignoreE
, EC.lambda "f" $ EC.block
[ EC.applyMany (EC.project "insert" $ EC.variable $ cfiName n) [openMuxSeqIdx $ EC.constant $ CInt 0]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfpName n) [openMuxSeqIdx $ EC.project "path" $ EC.variable "f"]
, EC.applyMany openFn [EC.variable "me", globalMuxChanIdE, EC.project "path" $ EC.variable "f", formatE, EC.constant $ CBool txt, modeE]
, muxSafeRefreshChan False EC.unit $ EC.variable $ cmcName n
, EC.assign (cmcName n) $ EC.binop OAdd (EC.variable $ cmcName n) (EC.constant $ CInt 1) ]]]
{- Order file constants for polyfiles. -}
orderFormatE = EC.constant $ CString "csv"
orderTxtE = EC.constant $ CBool True
orderPathE orderpath = if (not $ null orderpath) && (isPunctuation $ head orderpath)
then EC.constant $ CString orderpath
else EC.variable orderpath
openPolyFileFnE openFn orderpath txt =
EC.block [
EC.applyMany openFn [EC.variable "me", pmuxOrderChanIdE, orderPathE orderpath, orderFormatE, orderTxtE, modeE],
EC.applyMany (EC.project "iterate" argE)
[ EC.lambda "f" $ EC.block
[ EC.applyMany openFn [EC.variable "me", globalMuxChanIdE, EC.project "path" $ EC.variable "f", formatE, EC.constant $ CBool txt, modeE]
, EC.assign (cmcName n) $ EC.binop OAdd (EC.variable $ cmcName n) (EC.constant $ CInt 1) ]]]
openPolyFileSeqFnE openFn orderpath txt =
EC.block [
EC.applyMany openFn [EC.variable "me", pmuxOrderChanIdE, orderPathE orderpath, orderFormatE, orderTxtE, modeE],
EC.applyMany (EC.project "iterate" argE)
[ EC.lambda "seqc" $
EC.applyMany (EC.project "safe_at" $ EC.project "seq" $ EC.variable "seqc")
[ EC.constant $ CInt 0
, ignoreE
, EC.lambda "f" $ EC.block
[ EC.applyMany (EC.project "insert" $ EC.variable $ cfiName n) [openMuxSeqIdx $ EC.constant $ CInt 0]
, EC.applyMany (EC.project "insert" $ EC.variable $ cfpName n) [openMuxSeqIdx $ EC.project "path" $ EC.variable "f"]
, EC.applyMany openFn [EC.variable "me", globalMuxChanIdE, EC.project "path" $ EC.variable "f", formatE, EC.constant $ CBool txt, modeE]
, EC.assign (cmcName n) $ EC.binop OAdd (EC.variable $ cmcName n) (EC.constant $ CInt 1) ]]]]
modeE = EC.constant . CString $ if isSource then "r" else "w"
startE = case eSpec of
BuiltinEP _ _ -> fileStartE
FileEP _ _ _ -> fileStartE
NetworkEP _ _ _ -> EC.applyMany registerSocketDataTriggerFn [sourceId n, EC.variable $ ccName n]
FileSeqEP _ _ _ -> fileStartE
FileMuxEP _ _ _ -> fileStartE
FileMuxseqEP _ _ _ -> fileStartE
PolyFileMuxEP _ _ _ _ _ -> fileStartE
PolyFileMuxSeqEP _ _ _ _ _ -> fileStartE
_ -> error "Invalid endpoint argument"
fileStartE = EC.send (EC.variable $ ccName n) myAddr EC.unit
closeE = case eSpec of
FileMuxEP _ _ _ -> closeMuxE
FileMuxseqEP _ _ _ -> closeMuxE
PolyFileMuxEP _ _ _ _ _ -> closePMuxE
PolyFileMuxSeqEP _ _ _ _ _ -> closePMuxE
_ -> EC.applyMany closeFn [EC.variable "me", sourceId n]
closeMuxE = EC.applyMany (EC.project "iterate" $ EC.applyMany (EC.variable "range") [EC.variable $ cmcName n])
[EC.lambda "r" $ EC.applyMany closeFn [EC.variable "me", muxChanIdE $ EC.project "elem" $ EC.variable "r"]]
closePMuxE = EC.block [ EC.applyMany closeFn [EC.variable "me", pmuxOrderChanIdE], closeMuxE ]
controlE processE = case eSpec of
BuiltinEP _ _ -> fileControlE processE
FileEP _ _ _ -> fileControlE processE
NetworkEP _ _ _ -> processE
FileSeqEP _ _ _ -> fileControlE processE
FileMuxEP _ _ _ -> fileControlE processE
FileMuxseqEP _ _ _ -> fileControlE processE
PolyFileMuxEP _ _ _ _ _ -> fileControlE processE
PolyFileMuxSeqEP _ _ _ _ _ -> fileControlE processE
_ -> error "Invalid endpoint argument"
fileControlE processE = EC.block [processE, controlRcrE]
controlRcrE = EC.send (EC.variable $ ccName n) myAddr EC.unit
sourceId n' = EC.constant $ CString n'
ignoreE = EC.variable "ignore"
-- | Rewrites a source declaration's process method to access and
-- dispatch the next available event to all its bindings.
bindSource :: [(Identifier, EndpointSpec)] -> [(Identifier, Identifier)] -> K3 Declaration -> (K3 Declaration, [K3 Declaration])
bindSource specs bindings d
| DGlobal src t eOpt <- tag d
, TSource <- tag t
= (d, mkDispatchFn src eOpt t)
| otherwise = (d, [])
where
-- | Constructs a dispatch function declaration for a source.
mkDispatchFn n eOpt t = case lookup n specs of
Just (FileMuxEP _ _ _) -> [mkFeedFn n $ head $ children t]
Just (FileMuxseqEP _ _ _) -> [mkFeedFn n $ head $ children t]
Just (PolyFileMuxEP _ _ _ _ _) -> [mkFeedFn n $ head $ children t]
Just (PolyFileMuxSeqEP _ _ _ _ _) -> [mkFeedFn n $ head $ children t]
Just _ -> [mkProcessFn n eOpt]
Nothing -> []
mkProcessFn n eOpt = builtinGlobal (cpName n) (qualifyT unitFnT) (Just . qualifyE $ pbody n eOpt)
mkFeedFn n t = builtinGlobal (cfName n) (qualifyT $ feedFnT t) (Just . qualifyE $ fbody n)
pbody n eOpt = EC.lambda "_" $ EC.applyMany (processFnE n) [nextE n eOpt]
fbody n = processFnE n
processFnE n = EC.lambda "next" $ EC.block $
map (\(_,dest) -> sendNextE dest) $ filter ((n ==) . fst) bindings
nextE _ (Just e) = stripEUIDSpan e
nextE n Nothing = EC.applyMany (EC.variable $ crName n) [EC.unit]
sendNextE dest = EC.send (EC.variable dest) myAddr (EC.variable "next")
unitFnT = TC.function TC.unit TC.unit
feedFnT argT = TC.function argT TC.unit
-- | Constructs an "atInit" expression for initializing and starting sources.
mkRunSourceE :: Identifier -> K3 Expression
mkRunSourceE n = EC.block [EC.applyMany (EC.variable $ ciName n) [EC.unit],
EC.applyMany (EC.variable $ csName n) [EC.unit]]
-- | Constructs an "atInit" expression for initializing sinks.
mkRunSinkE :: Identifier -> K3 Expression
mkRunSinkE n = EC.applyMany (EC.variable $ ciName n) [EC.unit]
-- TODO: at_exit function body
declareBuiltins :: K3 Declaration -> K3 Declaration
declareBuiltins d
| DRole n <- tag d, n == defaultRoleName = replaceCh d new_children
| otherwise = d
where new_children = peerDecls ++ (children d)
peerDecls = [
mkGlobal myId TC.address Nothing,
mkGlobal peersId peersT Nothing,
mkGlobal roleId roleT Nothing]
peersT = mkCollection [("addr", TC.address)] "Collection"
roleT = mkCollection [(roleElemLbl, TC.string)] "Set"
mkGlobal n t eOpt = builtinGlobal n (qualifyT t) $ maybe Nothing (Just . qualifyE) eOpt
mkCurriedFnT tl = foldr1 TC.function tl
--mkAUnitFnT at = TC.function at TC.unit
--mkRUnitFnT rt = TC.function TC.unit rt
--unitFnT = TC.function TC.unit TC.unit
mkCollection fields ann = (TC.collection $ TC.record $ map (qualifyT <$>) fields) @+ TAnnotation ann
| DaMSL/K3 | src/Language/K3/Parser/ProgramBuilder.hs | apache-2.0 | 31,160 | 0 | 22 | 8,284 | 10,022 | 5,128 | 4,894 | 539 | 52 |
{-# LANGUAGE TemplateHaskell #-}
module Cluster where
import Control.Lens
import Control.Lens.TH
import Control.Concurrent.STM
newtype Cluster = Cluster {
_size :: Int
}
makeLenses ''Cluster | kdkeyser/halvin | src/halvin/Cluster.hs | apache-2.0 | 196 | 0 | 6 | 29 | 43 | 27 | 16 | 8 | 0 |
module SSync.JSVector (Vector, (!), create, empty) where
import qualified SSync.JSVectorM as VM
import System.IO.Unsafe (unsafePerformIO)
import SSync.JSVector.Internal
newtype Vector = Vector VM.Vector
instance Show Vector where
show _ = "[...]"
foreign import javascript unsafe "$1[$2]" idx :: VM.Vector -> Int -> Int
(!) :: Vector -> Int -> Int
(Vector v) ! i = idx v i
{-# INLINE (!) #-}
-- This unsafePerformIO / unsafeFreeze is safe as long as users don't
-- break the VectorMonad abstraction (which is why it lives in
-- SSync.JSVector.Internal instead of SSync.JSVectorM).
create :: VectorMonad VM.Vector -> Vector
create v = unsafeFreeze (unsafePerformIO $ runVectorMonad v)
unsafeFreeze :: VM.Vector -> Vector
unsafeFreeze = Vector
empty :: Vector
empty = create (VM.new 0)
{-# NOINLINE empty #-}
| socrata-platform/ssync | src/main/haskell/SSync/JSVector.hs | apache-2.0 | 818 | 9 | 8 | 134 | 212 | 120 | 92 | 18 | 1 |
-- Copyright 2020 Google LLC
--
-- Licensed under the Apache License, Version 2.0 (the "License");
-- you may not use this file except in compliance with the License.
-- You may obtain a copy of the License at
--
-- http://www.apache.org/licenses/LICENSE-2.0
--
-- Unless required by applicable law or agreed to in writing, software
-- distributed under the License is distributed on an "AS IS" BASIS,
-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-- See the License for the specific language governing permissions and
-- limitations under the License.
module Chain3Dupe where
import Chain2Dupe ()
chain3Dupe f = appendFile f "f"
| google/hrepl | hrepl/tests/Chain3Dupe.hs | apache-2.0 | 668 | 0 | 5 | 115 | 36 | 26 | 10 | 3 | 1 |
import Data.Monoid
{-comp :: String -> String -> Ordering-}
{-comp xs ys-}
{-| length xs < length ys = LT-}
{-| length xs > length ys = GT-}
{-| otherwise = xs `compare` ys-}
comp' :: String -> String -> Ordering
comp' x y = (length x `compare` length y) `mappend`
noVowels x y `mappend`
(x `compare` y)
where noVowels x y = (length $ getVowels x) `compare`
(length $ getVowels y)
getVowels = filter (`elem` "aeiou")
| paulbarbu/haskell-ground | monoid.hs | apache-2.0 | 518 | 0 | 10 | 178 | 127 | 73 | 54 | 8 | 1 |
{-# LANGUAGE FlexibleContexts #-}
module HaskHOL.Lib.IndTypes.Pre where
import HaskHOL.Core hiding (typeOf, rights, lefts)
import HaskHOL.Core.Kernel (typeOf)
import qualified HaskHOL.Core.State as S (mkType)
import HaskHOL.Deductive hiding (getDefinition, getSpecification, newDefinition)
import HaskHOL.Lib.Pair
import HaskHOL.Lib.WF
import HaskHOL.Lib.IndTypesPre
tmZERO, tmSUC :: WFCtxt thry => HOL cls thry HOLTerm
tmZERO = serve [wf| 0 |]
tmSUC = serve [wf| SUC |]
sucivate :: WFCtxt thry => Int -> HOL cls thry HOLTerm
sucivate n = funpowM n (mkComb tmSUC) =<< toHTm tmZERO
ruleSCRUB_EQUATION :: BoolCtxt thry => HOLTerm -> (HOLThm, HOLTermEnv)
-> HOL cls thry (HOLThm, HOLTermEnv)
ruleSCRUB_EQUATION eq (th, insts) =
do eq' <- foldrM subst eq (map (: []) insts)
(l, r) <- destEq eq'
th' <- ruleDISCH eq' th
th'' <- ruleMP (primINST [(l, r)] th') $ primREFL r
return (th'', (l, r):insts)
justifyInductiveTypeModel :: WFCtxt thry
=> [(HOLType, [(Text, [HOLType])])]
-> HOL cls thry ([HOLThm], HOLThm, HOLThm)
justifyInductiveTypeModel def =
do tTm <- serve [wf| T |]
nTm <- serve [wf| n:num |]
bepsTm <- serve [wf| @x:bool. T |]
let (newtys, rights) = unzip def
tyargls = foldr ((++) . map snd) [] rights
alltys = foldr (munion . flip (\\) newtys) [] tyargls
epstms <- mapM (\ ty -> mkSelect (mkVar "v" ty) tTm) alltys
pty <- foldr1M (\ ty1 ty2 -> mkType "prod" [ty1, ty2]) alltys <|>
return tyBool
recty <- mkType "recspace" [pty]
constr <- mkConst "CONSTR" [(tyA, pty)]
fcons <- mkConst "FCONS" [(tyA, recty)]
bot <- mkConst "BOTTOM" [(tyA, pty)]
bottail <- mkAbs nTm bot
--
let mkConstructor :: WFCtxt thry
=> Int -> (Text, [HOLType]) -> HOL cls thry HOLTerm
mkConstructor n (cname, cargs) =
let ttys = map (\ ty -> if ty `elem` newtys
then recty else ty) cargs
args = mkArgs "a" [] ttys
(rargs, iargs) = partition (\ t -> typeOf t == recty) args
--
mkInjector :: MonadCatch m => [HOLTerm] -> [HOLType]
-> [HOLTerm] -> m [HOLTerm]
mkInjector _ [] _ = return []
mkInjector (tm:tms) (ty:tys) is =
(do (a, iargs') <- remove (\ t -> typeOf t == ty) is
tl <- mkInjector tms tys iargs'
return (a:tl))
<|> (do tl <- mkInjector tms tys is
return (tm:tl))
mkInjector _ _ _ = fail' "mkInjector" in
--
do iarg <- (foldr1M mkPair =<< mkInjector epstms alltys iargs) <|>
return bepsTm
rarg <- foldrM (mkBinop fcons) bottail rargs
conty <- foldrM mkFunTy recty $ map typeOf args
n' <- sucivate n
condef <- listMkComb constr [n', iarg, rarg]
mkEq (mkVar cname conty) =<< listMkAbs args condef
--
mkConstructors :: WFCtxt thry => Int -> [(Text, [HOLType])]
-> HOL cls thry [HOLTerm]
mkConstructors _ [] = return []
mkConstructors n (x:xs) =
do hd <- mkConstructor n x
tl <- mkConstructors (n + 1) xs
return (hd:tl)
--
condefs <- mkConstructors 0 $ concat rights
conths <- mapM primASSUME condefs
predty <- mkFunTy recty tyBool
let edefs = foldr (\ (x, l) acc -> map (\ t -> (x, t)) l ++ acc) [] def
idefs <- map2 (\ (r, (_, atys)) d -> ((r, atys), d)) edefs condefs
--
let mkRule :: ((HOLType, [HOLType]), HOLTerm)
-> HOL cls thry HOLTerm
mkRule ((r, a), condef) =
do (left, right) <- destEq condef
let (args, _) = stripAbs right
lapp <- listMkComb left args
conds <- foldr2M (\ arg argty sofar ->
if argty `elem` newtys
then do ty' <- destVarType argty
arg' <- mkComb (mkVar ty' predty) arg
return (arg':sofar)
else return sofar) [] args a
ty' <- destVarType r
conc <- mkComb (mkVar ty' predty) lapp
rule <- if null conds then return conc
else flip mkImp conc =<< listMkConj conds
listMkForall args rule
--
rules <- listMkConj =<< mapM mkRule idefs
th0 <- deriveNonschematicInductiveRelations rules
th1 <- proveMonotonicityHyps th0
(th2a, th2bc) <- ruleCONJ_PAIR th1
th2b <- ruleCONJUNCT1 th2bc
return (conths, th2a, th2b)
where munion :: Eq a => [a] -> [a] -> [a]
munion s1 = try' . munion' s1
munion' :: (Eq a, MonadCatch m) => [a] -> [a] -> m [a]
munion' [] s2 = return s2
munion' (h1:s1') s2 =
(do (_, s2') <- remove (== h1) s2
tl <- munion' s1' s2'
return (h1:tl))
<|> do tl <- munion' s1' s2
return (h1:tl)
proveModelInhabitation :: BoolCtxt thry => HOLThm -> HOL cls thry [HOLThm]
proveModelInhabitation rth =
do srules <- mapM ruleSPEC_ALL =<< ruleCONJUNCTS rth
let (imps, bases) = partition (isImp . concl) srules
impConcs <- mapM (rand . concl) imps
let concs = map concl bases ++ impConcs
preds <- liftM setify $ mapM (repeatM rator) concs
ithms <- exhaustInhabitations imps bases
mapM (\ p -> find (\ th -> (fst . stripComb $ concl th) == p) ithms)
preds
where exhaustInhabitations :: BoolCtxt thry => [HOLThm] -> [HOLThm]
-> HOL cls thry [HOLThm]
exhaustInhabitations ths sofar =
let dunnit = setify $ map (fst . stripComb . concl) sofar in
do useful <- filterM (\ (Thm _ c) ->
do c' <- (fst . stripComb) `fmap` rand c
return $! c' `notElem` dunnit) ths
if null useful then return sofar
else do newth <- tryFind followHorn useful
exhaustInhabitations ths (newth:sofar)
where followHorn :: BoolCtxt thry => HOLThm -> HOL cls thry HOLThm
followHorn thm =
do preds <- liftM (map (fst . stripComb) . conjuncts) .
lHand $ concl thm
asms <- mapM (\ p ->
find (\ (Thm _ c') -> fst (stripComb c') == p)
sofar) preds
ruleMATCH_MP thm $ foldr1M ruleCONJ asms
defineInductiveType :: BoolCtxt thry => [HOLThm] -> HOLThm
-> HOL Theory thry (HOLThm, HOLThm)
defineInductiveType cdefs exth@(Thm asl extm) =
let (epred@(Var ename _), _) = stripComb extm in
do th1@(Thm _ c1) <- primASSUME =<<
findM (\ eq -> do eq' <- lHand eq
return $! eq' == epred) asl
th1' <- runConv (convSUBS cdefs) =<< rand c1
th2 <- primTRANS th1 th1'
th2' <- ruleAP_THM th2 =<< rand extm
th3@(Thm asl3 _) <- primEQ_MP th2' exth
(th4, _) <- foldrM ruleSCRUB_EQUATION (th3, []) asl3
let mkname = "_mk_" `append` ename
destname = "_dest_" `append` ename
(bij1, bij2@(Thm _ bc)) <- newBasicTypeDefinition ename mkname destname
th4
bij2a <- ruleAP_THM th2 =<< rand =<< rand bc
bij2b <- primTRANS bij2a bij2
return (bij1, bij2b)
defineInductiveType _ _ = error "defineInductiveType: exhaustive warning."
defineInductiveTypeConstructor :: PairCtxt thry => [HOLThm]
-> [(HOLTerm, (HOLTerm, HOLTerm))]
-> HOLThm -> HOL Theory thry HOLThm
defineInductiveTypeConstructor defs consindex (Thm _ c) =
let (_, bod) = stripForall c in
do asms <- if isImp bod then liftM conjuncts $ lHand bod else return []
conc <- if isImp bod then rand bod else return bod
asmlist <- mapM destComb asms
(cpred, cterm) <- destComb conc
let (oldcon, oldargs) = stripComb cterm
(newrights, newargs) <- mapAndUnzipM (modifyArg asmlist) oldargs
(retmk, _) <- cpred `assoc` consindex
defbod <- mkComb retmk =<< listMkComb oldcon newrights
defrt <- listMkAbs newargs defbod
expth <- findM (\ (Thm _ c') -> do c'' <- lHand c'
return $! c'' == oldcon) defs
deflf@(Var name _) <- (\ (x, _) -> mkVar x $ typeOf defrt) =<<
destVar oldcon
rexpth <- runConv (convSUBS [expth]) defrt
deftm <- mkEq deflf =<< rand (concl rexpth)
defth <- newDefinition (name, deftm)
primTRANS defth =<< ruleSYM rexpth
where modifyArg :: HOLTermEnv -> HOLTerm -> HOL cls thry (HOLTerm, HOLTerm)
modifyArg asmlist v =
(do (_, dest) <- flip assoc consindex =<< v `revAssoc` asmlist
ty' <- liftM (head . snd) . destType $ typeOf dest
v' <- (\ (x, _) -> mkVar x ty') =<< destVar v
v'' <- mkComb dest v'
return (v'', v'))
<|> return (v, v)
defineInductiveTypeConstructor _ _ _ =
error "defineInductiveTypeConstructor: exhaustive warning."
instantiateInductionTheorem :: BoolCtxt thry => [(HOLTerm, (HOLTerm, HOLTerm))]
-> HOLThm -> HOL cls thry HOLThm
instantiateInductionTheorem consindex ith =
let (avs, bod) = stripForall (concl ith) in
do corlist <- mapM ((repeatM rator `ffCombM` repeatM rator) <=<
destImp <=< body <=< rand) =<<
liftM conjuncts (rand bod)
consindex' <- mapM (\ v -> do w <- v `revAssoc` corlist
r' <- w `assoc` consindex
return (w, r')) avs
recty <- liftM (head . snd) . destType . typeOf . fst . snd $
head consindex
newtys <- mapM (liftM (head . snd) . destType . typeOf . snd . snd)
consindex'
ptypes <- mapM (`mkFunTy` tyBool) newtys
let preds = mkArgs "P" [] ptypes
args = mkArgs "x" [] $ map (const recty) preds
lambs <- map2M (\ (r, (m, _)) (p, a) ->
do l <- mkComb r a
cnj <- mkConj l =<< mkComb p =<< mkComb m a
mkAbs a cnj) consindex' $ zip preds args
ruleSPECL lambs ith
pullbackInductionClause :: BoolCtxt thry => [(HOLThm, HOLThm)] -> [HOLThm]
-> HOLThm -> HOLTerm -> HOL cls thry HOLThm
pullbackInductionClause tybijpairs conthms rthm tm =
let (avs, bimp) = stripForall tm in
case bimp of
(ant :==> con) ->
do ths <- mapM (ruleCONV convBETA) =<<
ruleCONJUNCTS (primASSUME ant)
(tths, pths) <- mapAndUnzipM ruleCONJ_PAIR ths
tth <- ruleMATCH_MP (ruleSPEC_ALL rthm) $ foldr1M ruleCONJ tths
mths <- mapM ruleIP (tth:tths)
conth1 <- runConv convBETA con
contm1 <- rand $ concl conth1
cth2 <- runConv (convSUBS (tail mths)) =<< rand contm1
conth2 <- primTRANS conth1 $
flip ruleAP_TERM cth2 =<< rator contm1
conth3 <- rulePRE conth2
let lctms = map concl pths
lctms' <- listMkConj lctms
asmin <- mkImp lctms' =<< rand =<< rand (concl conth3)
argsin <- mapM rand =<< liftM conjuncts (lHand asmin)
argsgen <- mapM (\ x ->
do xname <- fst `fmap` (destVar =<< rand x)
return . mkVar xname $ typeOf x) argsin
asmgen <- subst (zip argsin argsgen) asmin
asmquant <- flip listMkForall asmgen =<<
liftM (snd . stripComb) (rand =<< rand asmgen)
th0 <- ruleSPEC_ALL $ primASSUME asmquant
th1 <- primINST (zip argsgen argsin) th0
th2 <- ruleMP th1 =<< foldr1M ruleCONJ pths
th2' <- ruleCONJ tth th2
th3 <- primEQ_MP (ruleSYM conth3) th2'
ruleDISCH asmquant . ruleGENL avs $ ruleDISCH ant th3
con ->
do conth2 <- runConv convBETA con
tth <- rulePART_MATCH return rthm =<< lHand =<<
rand (concl conth2)
conth3 <- rulePRE conth2
asmgen <- rand =<< rand (concl conth3)
asmquant <- flip listMkForall asmgen =<<
liftM (snd . stripComb) (rand asmgen)
th2 <- ruleSPEC_ALL $ primASSUME asmquant
th2' <- ruleCONJ tth th2
th3 <- primEQ_MP (ruleSYM conth3) th2'
ruleDISCH asmquant =<< ruleGENL avs th3
where rulePRE :: BoolCtxt thry => HOLThm -> HOL cls thry HOLThm
rulePRE thm = do thms <- mapM ruleSYM conthms
ruleGEN_REWRITE (funpow 3 convRAND) thms thm
ruleIP :: BoolCtxt thry => HOLThm -> HOL cls thry HOLThm
ruleIP thm = ruleSYM $ ruleGEN_REWRITE id (map snd tybijpairs) thm
finishInductionConclusion :: BoolCtxt thry => [(HOLTerm, (HOLTerm, HOLTerm))]
-> [(HOLThm, HOLThm)] -> HOLThm -> HOL cls thry HOLThm
finishInductionConclusion consindex tybijpairs th =
do (_, bimp) <- destForall $ concl th
pv <- lHand =<< body =<< rator =<< rand bimp
(p, v) <- destComb pv
(_, dest) <- p `assoc` consindex
ty <- liftM (head . snd) . destType $ typeOf dest
v' <- liftM (\ (x, _) -> mkVar x ty) $ destVar v
dv <- mkComb dest v'
th1 <- rulePRE =<< ruleSPEC dv th
th2 <- ruleMP th1 $ primREFL =<< rand =<< lHand (concl th1)
th3 <- ruleCONV convBETA th2
ruleGEN v' =<< ruleFIN =<< ruleCONJUNCT2 th3
where rulePRE :: BoolCtxt thry => HOLThm -> HOL cls thry HOLThm
rulePRE = let (tybij1, tybij2) = unzip tybijpairs in
ruleGEN_REWRITE (convLAND . convLAND . convRAND) tybij1 <=<
ruleGEN_REWRITE convLAND tybij2
ruleFIN :: BoolCtxt thry => HOLThm -> HOL cls thry HOLThm
ruleFIN = let (tybij1, _) = unzip tybijpairs in
ruleGEN_REWRITE convRAND tybij1
deriveInductionTheorem :: BoolCtxt thry => [(HOLTerm, (HOLTerm, HOLTerm))]
-> [(HOLThm, HOLThm)] -> [HOLThm] -> HOLThm -> HOLThm
-> HOL cls thry HOLThm
deriveInductionTheorem consindex tybijpairs conthms iith rth =
do rths <- ruleCONJUNCTS rth
bths <- map2M (pullbackInductionClause tybijpairs conthms) rths =<<
liftM conjuncts (lHand $ concl iith)
asm <- listMkConj =<< mapM (lHand . concl) bths
ths <- map2M ruleMP bths =<< ruleCONJUNCTS (primASSUME asm)
th1 <- ruleMP iith $ foldr1M ruleCONJ ths
th2 <- foldr1M ruleCONJ =<<
mapM (finishInductionConclusion consindex tybijpairs) =<<
ruleCONJUNCTS th1
th3 <- ruleDISCH asm th2
preds <- mapM (rator <=< body <=< rand) =<<
liftM conjuncts (rand $ concl th3)
th4 <- ruleGENL preds th3
pasms <- filterM (liftM (flip elem (map fst consindex)) .lHand) $ hyp th4
th5 <- foldrM ruleDISCH th4 pasms
(th6, _) <- foldrM ruleSCRUB_EQUATION (th5, []) $ hyp th5
th7 <- ruleUNDISCH_ALL th6
liftM fst . foldrM ruleSCRUB_EQUATION (th7, []) $ hyp th7
createRecursiveFunctions :: BoolCtxt thry => [(HOLThm, HOLThm)]
-> [(HOLTerm, (HOLTerm, HOLTerm))] -> [HOLThm]
-> HOLThm -> HOL cls thry HOLThm
createRecursiveFunctions tybijpairs consindex conthms rth =
do domtys <- mapM (liftM (head . snd) . destType . typeOf . snd . snd)
consindex
recty <- liftM (head . snd) . destType . typeOf . fst . snd $
head consindex
let ranty = mkVarType "Z"
fnty <- mkFunTy recty ranty
fn <- mkVar "fn" fnty
fns <- liftM (mkArgs "fn" []) $ mapM (`mkFunTy` ranty) domtys
let args = mkArgs "a" [] domtys
rights <- map2M (\ (_, (_, d)) a ->
mkAbs a . mkComb fn $ mkComb d a) consindex args
eqs <- map2M mkEq fns rights
fdefs <- mapM primASSUME eqs
fxths1 <- mapM (\ th1 -> tryFind (`primMK_COMB` th1) fdefs) conthms
fxths2 <- mapM (\ th -> do th' <- runConv convBETA =<< rand (concl th)
primTRANS th th') fxths1
rths <- ruleCONJUNCTS rth
fxths3 <- map2M simplifyFxthm rths fxths2
fxths4 <- map2M (\ th1 -> primTRANS th1 . ruleAP_TERM fn) fxths2 fxths3
fxth5 <- foldr1M ruleCONJ =<< map2M (cleanupFxthm fn) conthms fxths4
pasms <- filterM (liftM (flip elem (map fst consindex)) . lHand) $
hyp fxth5
fxth6 <- foldrM ruleDISCH fxth5 pasms
(fxth7, _) <- foldrM ruleSCRUB_EQUATION (fxth6, []) $
foldr (union . hyp) [] conthms
fxth8 <- ruleUNDISCH_ALL fxth7
(fxth9, _) <- foldrM ruleSCRUB_EQUATION (fxth8, []) (hyp fxth8 \\ eqs)
return fxth9
where mkTybijcons :: (HOLThm, HOLThm) -> HOL cls thry HOLThm
mkTybijcons (th1, th2) =
do tms <- pairMapM (rand <=< lHand . concl) (th2, th1)
th3 <- primINST [tms] th2
c <- rator =<< lHand =<< rand (concl th2)
th4 <- ruleAP_TERM c th1
primEQ_MP (ruleSYM th3) th4
convS :: BoolCtxt thry => Conversion cls thry
convS = convGEN_REWRITE id (map mkTybijcons tybijpairs)
ruleE :: BoolCtxt thry => HOLThm -> HOL cls thry HOLThm
ruleE = ruleGEN_REWRITE id (map snd tybijpairs)
simplifyFxthm :: BoolCtxt thry => HOLThm -> HOLThm
-> HOL cls thry HOLThm
simplifyFxthm rthm fxth =
do pat <- funpowM 4 rand $ concl fxth
rtm <- repeatM (liftM snd . destForall)$ concl rthm
if isImp rtm
then do th1 <- rulePART_MATCH (rand <=< rand) rthm pat
tms1 <- liftM conjuncts . lHand $ concl th1
ths2 <- mapM (\ t -> primEQ_MP (ruleSYM $ runConv convS t)
thmTRUTH) tms1
ruleE =<< ruleMP th1 =<< foldr1M ruleCONJ ths2
else ruleE =<< rulePART_MATCH rand rthm pat
cleanupFxthm :: HOLTerm -> HOLThm -> HOLThm -> HOL cls thry HOLThm
cleanupFxthm fn cth fxth =
do tms <- liftM (snd . stripComb) $ rand =<< rand (concl fxth)
kth <- ruleRIGHT_BETAS tms $ primASSUME (head $ hyp cth)
primTRANS fxth $ ruleAP_TERM fn kth
createRecursionIsoConstructor :: WFCtxt thry
=> [(HOLTerm, (HOLTerm, HOLTerm))] -> HOLThm
-> HOL cls thry HOLTerm
createRecursionIsoConstructor consindex cth =
do numty <- S.mkType "num" ([]::[HOLType])
let zty = mkVarType "Z"
s <- liftM (mkVar "s") $ mkFunTy numty zty
recty <- liftM (head . snd) . destType . typeOf . fst $ head consindex
domty <- liftM (head . snd) $ destType recty
let i = mkVar"i" domty
r <- liftM (mkVar "r") $ mkFunTy numty recty
let mks = map (fst . snd) consindex
mkindex = map (\ t -> (head . tail . snd . try' . destType $
typeOf t, t)) mks
artms <- (snd . stripComb) `fmap` (rand =<< rand (concl cth))
artys <- mapFilterM (fmap typeOf . rand) artms
(args, bod) <- liftM stripAbs . rand . head $ hyp cth
(ccitm, rtm) <- destComb bod
(_, itm) <- destComb ccitm
let (rargs, iargs) = partition (`freeIn` rtm) args
xths <- mapM (extractArg itm) iargs
cargs' <- mapM (subst [(itm, i)] <=< lHand . concl) xths
indices <- mapM sucivate [0..(length rargs - 1)]
rindexed <- mapM (mkComb r) indices
rargs' <- map2M (\ a rx -> flip mkComb rx =<<
(a `assoc` mkindex)) artys rindexed
sargs' <- mapM (mkComb s) indices
let allargs = cargs' ++ rargs' ++ sargs'
funty <- foldrM (mkFunTy . typeOf) zty allargs
funname <- liftM fst $ destConst =<< repeatM rator =<< lHand (concl cth)
let funarg = mkVar (funname `snoc` '\'') funty
listMkAbs [i, r, s] =<< listMkComb funarg allargs
where extractArg :: PairCtxt thry => HOLTerm -> HOLTerm
-> HOL cls thry HOLThm
extractArg tup v
| v == tup = primREFL tup
| otherwise =
do (t1, t2) <- destPair tup
thPAIR <- ruleISPECL [t1, t2] $ if v `freeIn` t1 then thmFST
else thmSND
tup' <- rand $ concl thPAIR
if tup' == v
then return thPAIR
else ruleSUBS [ruleSYM thPAIR] =<< extractArg tup' v
deriveRecursionTheorem :: IndTypesPreCtxt thry
=> [(HOLThm, HOLThm)]
-> [(HOLTerm, (HOLTerm, HOLTerm))] -> [HOLThm]
-> HOLThm -> HOL cls thry HOLThm
deriveRecursionTheorem tybijpairs consindex conthms rath =
do isocons <- mapM (createRecursionIsoConstructor consindex) conthms
let ty = typeOf $ head isocons
fcons <- mkConst "FCONS" [(tyA, ty)]
fnil <- mkConst "FNIL" [(tyA, ty)]
bigfun <- foldrM (mkBinop fcons) fnil isocons
eth <- ruleISPEC bigfun thmCONSTR_REC
fn <- rator =<< rand (head . conjuncts $ concl rath)
(v, bod) <- destAbs =<< rand (concl eth)
betm <- varSubst [(v, fn)] bod
fnths <- mapM (\ t -> do t' <- bndvar =<< rand t
ruleRIGHT_BETAS [t'] $ primASSUME t) $ hyp rath
rthm <- foldr1M ruleCONJ =<< mapM (hackdownRath betm fnths) =<<
ruleCONJUNCTS rath
let unseqs = filter isEq $ hyp rthm
tys <- mapM (liftM (head . snd) . destType . typeOf . snd . snd)
consindex
seqs <- mapM (\ x -> findM (\ t ->
do t' <- lHand t
ty' <- liftM (head . snd) . destType $ typeOf t'
return $! ty' == x) unseqs) tys
rethm <- foldrM ruleEXISTS_EQUATION rthm seqs
fethm <- ruleCHOOSE fn eth rethm
pcons <- mapM (repeatM rator <=< rand <=<
repeatM (liftM snd . destForall)) .
conjuncts $ concl rthm
ruleGENL pcons fethm
where convC :: IndTypesPreCtxt thry => Conversion cls thry
convC = funpow 3 convRATOR . _REPEAT $
convGEN_REWRITE id [defFCONS]
convL :: BoolCtxt thry => HOLTerm -> Conversion cls thry
convL betm = convREWR (primASSUME betm)
ruleSIMPER :: IndTypesPreCtxt thry => [HOLThm] -> HOLThm -> HOL cls thry HOLThm
ruleSIMPER fnths th =
do ths1 <- mapM ruleSYM fnths
let ths2 = map fst tybijpairs
ths3 <- sequence [ thmFST, thmSND, thmBETA, defFCONS ]
rulePURE_REWRITE (ths1++ths2++ths3) th
hackdownRath :: IndTypesPreCtxt thry => HOLTerm -> [HOLThm] -> HOLThm
-> HOL cls thry HOLThm
hackdownRath betm fnths th =
do (ltm, rtm) <- destEq $ concl th
(_, wargs) <- stripComb `fmap` rand ltm
th0 <- runConv (convL betm) rtm
th1 <- primTRANS th th0
th1' <- runConv convC =<< rand (concl th1)
th2 <- primTRANS th1 th1'
th2' <- runConv (funpow 2 convRATOR convBETA) =<<
rand (concl th2)
th3 <- primTRANS th2 th2'
th3' <- runConv (convRATOR convBETA) =<< rand (concl th3)
th4 <- primTRANS th3 th3'
th4' <- runConv convBETA =<< rand (concl th4)
th5 <- primTRANS th4 th4'
ruleGENL wargs $ ruleSIMPER fnths th5
parseInductiveTypeSpecification :: MonadThrow m => ParseContext -> Text
-> m [(HOLType, [(Text, [HOLType])])]
parseInductiveTypeSpecification ctxt s =
mapM toTys =<< runHOLParser parser ctxt s
where parser :: MyParser [(Text, [(Text, [PreType])])]
parser = mywhiteSpace >> mysemiSep1 typeParser
typeParser :: MyParser (Text, [(Text, [PreType])])
typeParser = do x <- myidentifier
myreservedOp "="
ptys <- subtypeParser `mysepBy1` myreservedOp "|"
return (x, ptys)
subtypeParser :: MyParser (Text, [PreType])
subtypeParser = do x <- myidentifier
ptys <- mymany ptype
return (x, ptys)
toTys :: MonadThrow m => (Text, [(Text, [PreType])]) ->
m (HOLType, [(Text, [HOLType])])
toTys (s', ptys) =
let ty = mkVarType s' in
do tys <- mapM (\ (x, y) -> do y' <- mapM (tyElab ctxt) y
return (x, y')) ptys
return (ty, tys)
{- Basic version of defineTypeRaw.
Returns the induction and recursion theorems separately.
The parser isn't used.
-}
defineTypeRaw :: IndTypesPreCtxt thry
=> [(HOLType, [(Text, [HOLType])])]
-> HOL Theory thry (HOLThm, HOLThm)
defineTypeRaw def =
do (defs, rth, ith) <- justifyInductiveTypeModel def
neths <- proveModelInhabitation rth
tybijpairs <- mapM (defineInductiveType defs) neths
preds <- mapM (repeatM rator . concl) neths
mkdests <- mapM (\ (th, _) -> do tm <- lHand $ concl th
tm' <- rand tm
pairMapM rator (tm, tm')) tybijpairs
let consindex = zip preds mkdests
condefs <- mapM (defineInductiveTypeConstructor defs consindex) =<<
ruleCONJUNCTS rth
conthms <- mapM (\ th@(Thm _ c) ->
do cs <- (fst . stripAbs) `fmap` rand c
ruleRIGHT_BETAS cs th) condefs
iith <- instantiateInductionTheorem consindex ith
fth <- deriveInductionTheorem consindex tybijpairs conthms iith rth
rath <- createRecursiveFunctions tybijpairs consindex conthms rth
kth <- deriveRecursionTheorem tybijpairs consindex conthms rath
return (fth, kth)
| ecaustin/haskhol-math | src/HaskHOL/Lib/IndTypes/Pre.hs | bsd-2-clause | 27,255 | 1 | 23 | 10,208 | 9,471 | 4,635 | 4,836 | -1 | -1 |
-----------------------------------------------------------------------------
-- |
-- Module : Haddock.Backends.Html.DocMarkup
-- Copyright : (c) Simon Marlow 2003-2006,
-- David Waern 2006-2009,
-- Mark Lentczner 2010
-- License : BSD-like
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
-----------------------------------------------------------------------------
module Haddock.Backends.Xhtml.DocMarkup (
docToHtml,
rdrDocToHtml,
origDocToHtml,
docToHtmlNoAnchors,
docElement, docSection, docSection_,
) where
import Data.List
import Haddock.Backends.Xhtml.Names
import Haddock.Backends.Xhtml.Utils
import Haddock.Types
import Haddock.Utils
import Haddock.Doc (combineDocumentation, emptyMetaDoc,
metaDocAppend, metaConcat)
import Text.XHtml hiding ( name, p, quote )
import Data.Maybe (fromMaybe)
import GHC
import Name
parHtmlMarkup :: Qualification -> Bool
-> (Bool -> a -> Html) -> DocMarkup a Html
parHtmlMarkup qual insertAnchors ppId = Markup {
markupEmpty = noHtml,
markupString = toHtml,
markupParagraph = paragraph,
markupAppend = (+++),
markupIdentifier = thecode . ppId insertAnchors,
markupIdentifierUnchecked = thecode . ppUncheckedLink qual,
markupModule = \m -> let (mdl,ref) = break (=='#') m
-- Accomodate for old style
-- foo\#bar anchors
mdl' = case reverse mdl of
'\\':_ -> init mdl
_ -> mdl
in ppModuleRef (mkModuleName mdl') ref,
markupWarning = thediv ! [theclass "warning"],
markupEmphasis = emphasize,
markupBold = strong,
markupMonospaced = thecode,
markupUnorderedList = unordList,
markupOrderedList = ordList,
markupDefList = defList,
markupCodeBlock = pre,
markupHyperlink = \(Hyperlink url mLabel)
-> if insertAnchors
then anchor ! [href url]
<< fromMaybe url mLabel
else toHtml $ fromMaybe url mLabel,
markupAName = \aname -> namedAnchor aname << "",
markupPic = \(Picture uri t) -> image ! ([src uri] ++ fromMaybe [] (return . title <$> t)),
markupProperty = pre . toHtml,
markupExample = examplesToHtml,
markupHeader = \(Header l t) -> makeHeader l t
}
where
makeHeader :: Int -> Html -> Html
makeHeader 1 mkup = h1 mkup
makeHeader 2 mkup = h2 mkup
makeHeader 3 mkup = h3 mkup
makeHeader 4 mkup = h4 mkup
makeHeader 5 mkup = h5 mkup
makeHeader 6 mkup = h6 mkup
makeHeader l _ = error $ "Somehow got a header level `" ++ show l ++ "' in DocMarkup!"
examplesToHtml l = pre (concatHtml $ map exampleToHtml l) ! [theclass "screen"]
exampleToHtml (Example expression result) = htmlExample
where
htmlExample = htmlPrompt +++ htmlExpression +++ toHtml (unlines result)
htmlPrompt = (thecode . toHtml $ ">>> ") ! [theclass "prompt"]
htmlExpression = (strong . thecode . toHtml $ expression ++ "\n") ! [theclass "userinput"]
-- | We use this intermediate type to transform the input 'Doc' tree
-- in an arbitrary way before rendering, such as grouping some
-- elements. This is effectivelly a hack to prevent the 'Doc' type
-- from changing if it is possible to recover the layout information
-- we won't need after the fact.
data Hack a id =
UntouchedDoc (MetaDoc a id)
| CollapsingHeader (Header (DocH a id)) (MetaDoc a id) Int (Maybe String)
| HackAppend (Hack a id) (Hack a id)
deriving Eq
-- | Group things under bold 'DocHeader's together.
toHack :: Int -- ^ Counter for header IDs which serves to assign
-- unique identifiers within the comment scope
-> Maybe String
-- ^ It is not enough to have unique identifier within the
-- scope of the comment: if two different comments have the
-- same ID for headers, the collapse/expand behaviour will act
-- on them both. This serves to make each header a little bit
-- more unique. As we can't export things with the same names,
-- this should work more or less fine: it is in fact the
-- implicit assumption the collapse/expand mechanism makes for
-- things like ‘Instances’ boxes.
-> [MetaDoc a id] -> Hack a id
toHack _ _ [] = UntouchedDoc emptyMetaDoc
toHack _ _ [x] = UntouchedDoc x
toHack n nm (MetaDoc { _doc = DocHeader (Header l (DocBold x)) }:xs) =
let -- Header with dropped bold
h = Header l x
-- Predicate for takeWhile, grab everything including ‘smaller’
-- headers
p (MetaDoc { _doc = DocHeader (Header l' _) }) = l' > l
p _ = True
-- Stuff ‘under’ this header
r = takeWhile p xs
-- Everything else that didn't make it under
r' = drop (length r) xs
app y [] = y
app y ys = HackAppend y (toHack (n + 1) nm ys)
in case r of
-- No content under this header
[] -> CollapsingHeader h emptyMetaDoc n nm `app` r'
-- We got something out, stitch it back together into one chunk
y:ys -> CollapsingHeader h (foldl metaDocAppend y ys) n nm `app` r'
toHack n nm (x:xs) = HackAppend (UntouchedDoc x) (toHack n nm xs)
-- | Remove ‘top-level’ 'DocAppend's turning them into a flat list.
-- This lends itself much better to processing things in order user
-- might look at them, such as in 'toHack'.
flatten :: MetaDoc a id -> [MetaDoc a id]
flatten MetaDoc { _meta = m, _doc = DocAppend x y } =
let f z = MetaDoc { _meta = m, _doc = z }
in flatten (f x) ++ flatten (f y)
flatten x = [x]
-- | Generate the markup needed for collapse to happen. For
-- 'UntouchedDoc' and 'HackAppend' we do nothing more but
-- extract/append the underlying 'Doc' and convert it to 'Html'. For
-- 'CollapsingHeader', we attach extra info to the generated 'Html'
-- that allows us to expand/collapse the content.
hackMarkup :: DocMarkup id Html -> Hack (ModuleName, OccName) id -> Html
hackMarkup fmt' h' =
let (html, ms) = hackMarkup' fmt' h'
in html +++ renderMeta fmt' (metaConcat ms)
where
hackMarkup' :: DocMarkup id Html -> Hack (ModuleName, OccName) id
-> (Html, [Meta])
hackMarkup' fmt h = case h of
UntouchedDoc d -> (markup fmt $ _doc d, [_meta d])
CollapsingHeader (Header lvl titl) par n nm ->
let id_ = makeAnchorId $ "ch:" ++ fromMaybe "noid:" nm ++ show n
col' = collapseControl id_ True "caption"
instTable = (thediv ! collapseSection id_ False [] <<)
lvs = zip [1 .. ] [h1, h2, h3, h4, h5, h6]
getHeader = fromMaybe caption (lookup lvl lvs)
subCaption = getHeader ! col' << markup fmt titl
in ((subCaption +++) . instTable $ markup fmt (_doc par), [_meta par])
HackAppend d d' -> let (x, m) = hackMarkup' fmt d
(y, m') = hackMarkup' fmt d'
in (markupAppend fmt x y, m ++ m')
renderMeta :: DocMarkup id Html -> Meta -> Html
renderMeta fmt (Meta { _version = Just x }) =
markupParagraph fmt . markupEmphasis fmt . toHtml $
"Since: " ++ formatVersion x
where
formatVersion v = concat . intersperse "." $ map show v
renderMeta _ _ = noHtml
-- | Goes through 'hackMarkup' to generate the 'Html' rather than
-- skipping straight to 'markup': this allows us to employ XHtml
-- specific hacks to the tree first.
markupHacked :: DocMarkup id Html
-> Maybe String
-> MDoc id
-> Html
markupHacked fmt n = hackMarkup fmt . toHack 0 n . flatten
-- If the doc is a single paragraph, don't surround it with <P> (this causes
-- ugly extra whitespace with some browsers). FIXME: Does this still apply?
docToHtml :: Maybe String -- ^ Name of the thing this doc is for. See
-- comments on 'toHack' for details.
-> Qualification -> MDoc DocName -> Html
docToHtml n qual = markupHacked fmt n . cleanup
where fmt = parHtmlMarkup qual True (ppDocName qual Raw)
-- | Same as 'docToHtml' but it doesn't insert the 'anchor' element
-- in links. This is used to generate the Contents box elements.
docToHtmlNoAnchors :: Maybe String -- ^ See 'toHack'
-> Qualification -> MDoc DocName -> Html
docToHtmlNoAnchors n qual = markupHacked fmt n . cleanup
where fmt = parHtmlMarkup qual False (ppDocName qual Raw)
origDocToHtml :: Qualification -> MDoc Name -> Html
origDocToHtml qual = markupHacked fmt Nothing . cleanup
where fmt = parHtmlMarkup qual True (const $ ppName Raw)
rdrDocToHtml :: Qualification -> MDoc RdrName -> Html
rdrDocToHtml qual = markupHacked fmt Nothing . cleanup
where fmt = parHtmlMarkup qual True (const ppRdrName)
docElement :: (Html -> Html) -> Html -> Html
docElement el content_ =
if isNoHtml content_
then el ! [theclass "doc empty"] << spaceHtml
else el ! [theclass "doc"] << content_
docSection :: Maybe Name -- ^ Name of the thing this doc is for
-> Qualification -> Documentation DocName -> Html
docSection n qual = maybe noHtml (docSection_ n qual) . combineDocumentation
docSection_ :: Maybe Name -- ^ Name of the thing this doc is for
-> Qualification -> MDoc DocName -> Html
docSection_ n qual =
(docElement thediv <<) . docToHtml (getOccString <$> n) qual
cleanup :: MDoc a -> MDoc a
cleanup = overDoc (markup fmtUnParagraphLists)
where
-- If there is a single paragraph, then surrounding it with <P>..</P>
-- can add too much whitespace in some browsers (eg. IE). However if
-- we have multiple paragraphs, then we want the extra whitespace to
-- separate them. So we catch the single paragraph case and transform it
-- here. We don't do this in code blocks as it eliminates line breaks.
unParagraph :: Doc a -> Doc a
unParagraph (DocParagraph d) = d
unParagraph doc = doc
fmtUnParagraphLists :: DocMarkup a (Doc a)
fmtUnParagraphLists = idMarkup {
markupUnorderedList = DocUnorderedList . map unParagraph,
markupOrderedList = DocOrderedList . map unParagraph
}
| JPMoresmau/haddock | haddock-api/src/Haddock/Backends/Xhtml/DocMarkup.hs | bsd-2-clause | 10,602 | 0 | 17 | 3,017 | 2,478 | 1,313 | 1,165 | 155 | 9 |
module Data.Text.Prettyprint.Doc.Render.Terminal.Internal {-# DEPRECATED "Use \"Prettyprinter.Render.Terminal.Internal\" instead." #-} (
module Prettyprinter.Render.Terminal.Internal
) where
import Prettyprinter.Render.Terminal.Internal
| quchen/prettyprinter | prettyprinter-ansi-terminal/src/Data/Text/Prettyprint/Doc/Render/Terminal/Internal.hs | bsd-2-clause | 244 | 0 | 5 | 22 | 29 | 22 | 7 | 3 | 0 |
module Application.DocManager.Command where
import Application.DocManager.ProgType
import Application.DocManager.Job
import Application.DocManager.Config
import Text.Parsec
import System.Environment
import System.FilePath
commandLineProcess :: Docmanager -> IO ()
commandLineProcess c = do
putStrLn "test called"
homedir <- getEnv "HOME"
let dotdocmanager = homedir </> ".docmanager"
configstr <- readFile dotdocmanager
let conf_result = parse docManagerConfigParser "" configstr
case conf_result of
Left err -> putStrLn (show err)
Right dmc -> do
case c of
Test -> startJob dmc
Individual fname -> startIndividualJob dmc fname
| wavewave/docmanager | lib/Application/DocManager/Command.hs | bsd-2-clause | 686 | 0 | 15 | 133 | 181 | 88 | 93 | 20 | 3 |
module Test.Day17 where
import Day17
import Test.Tasty
import Test.Tasty.HUnit
day17 :: TestTree
day17 = testGroup "No Such Thing as Too Much" [part1, part2]
part1 :: TestTree
part1 = testGroup "Part 1" [p1Tests, p1Puzzle]
containers :: [Int]
containers = [20, 15, 10, 5, 5]
p1Tests :: TestTree
p1Tests = testGroup "Test Cases" [
testCase "Example 1" $ length (possibilities 25 containers) @?= 4
]
p1Puzzle :: TestTree
p1Puzzle = testCaseSteps "Puzzle" $ \_ -> do
sizes <- fmap (sequence . map parse .lines) $ readFile "input/day17.txt"
Right 654 @?= fmap (length . possibilities 150) sizes
part2 :: TestTree
part2 = testGroup "Part 2" [p2Tests, p2Puzzle]
p2Tests :: TestTree
p2Tests = testGroup "Test Cases" [
testCase "Example 2" $ length (possibilitiesMin 25 containers) @?= 3
]
p2Puzzle :: TestTree
p2Puzzle = testCaseSteps "Puzzle" $ \_ -> do
sizes <- fmap (sequence . map parse .lines) $ readFile "input/day17.txt"
Right 57 @?= fmap (length . possibilitiesMin 150) sizes
| taylor1791/adventofcode | 2015/test/Test/Day17.hs | bsd-2-clause | 1,008 | 2 | 14 | 186 | 347 | 181 | 166 | 26 | 1 |
{-# LANGUAGE OverloadedStrings #-}
module FortuneIndexer (
getTerm
) where
import Control.Monad (forM_)
import Data.List.Split (splitOn)
import Data.Char (isLetter, isSpace)
import Data.Text.Encoding as E
import qualified Data.Map as M
import qualified Data.Text as T
import qualified Data.Text.IO as T
import Database.Redis.Redis (Redis,Reply,connect,set,zincrBy)
import Database.Redis.ByteStringClass (BS,toBS,fromBS)
import Porter (stem)
import StopWords (isStopWord)
instance BS T.Text where
toBS = encodeUtf8
fromBS = decodeUtf8
type WordWeights = M.Map T.Text Int
splitOnPercent :: T.Text -> [T.Text]
splitOnPercent = T.splitOn "%"
splitOnBlankLines :: T.Text -> [T.Text]
splitOnBlankLines t = (init . map T.concat) (splitOn [" "] (T.lines t))
fortunePaths :: [(FilePath,T.Text -> [T.Text])]
fortunePaths = [ ("./fortune/CatV.fortune", splitOnBlankLines)
, ("./fortune/FreeBsd.fortune", splitOnPercent)
, ("./fortune/KernelNewbies.fortune", splitOnPercent)]
indexFortune :: Redis -> (FilePath,T.Text -> [T.Text]) -> IO ()
indexFortune redis (path,sep) = do
fortunesText <- T.readFile path
let fortunes = sep fortunesText
let termCounts = map getTerms fortunes
forM_ (zip fortunes [1..]) (\(fortune,n) -> set redis (path ++ show n) fortune)
forM_ (zip termCounts [1..]) (\(terms,n) -> addTerms redis (path ++ show n) terms)
indexFortunes :: Redis -> IO ()
indexFortunes r = forM_ fortunePaths (indexFortune r)
storeTermEntry :: Redis -> T.Text -> Int -> FilePath -> IO (Reply T.Text)
storeTermEntry r k v ref = zincrBy r k (fromIntegral v) (T.pack ref)
addTerms :: Redis -> String -> WordWeights -> IO ()
addTerms r ref wordWeights = mapM_ (\(k,v) -> storeTermEntry r k v ref) (M.toList wordWeights)
getTerms :: T.Text -> WordWeights
getTerms ws = foldr (\word counts -> M.insertWith' (+) word 1 counts) M.empty stemmedWords
where
stemmedWords = map getTerm $ filter (not . isStopWord) $ (T.words . T.toLower) ws
getTerm :: T.Text -> T.Text
getTerm = stem . T.filter isLetter . T.toLower
main :: IO ()
main = connect "localhost" "6379" >>= indexFortunes
| fffej/Keyword-Search | FortuneIndexer.hs | bsd-2-clause | 2,165 | 0 | 13 | 393 | 822 | 448 | 374 | 46 | 1 |
-----------------------------------------------------------------------------
-- |
-- Module : Application.HXournal.Coroutine.Eraser
-- Copyright : (c) 2011, 2012 Ian-Woo Kim
--
-- License : BSD3
-- Maintainer : Ian-Woo Kim <[email protected]>
-- Stability : experimental
-- Portability : GHC
--
-----------------------------------------------------------------------------
module Application.HXournal.Coroutine.Eraser where
import Graphics.UI.Gtk hiding (get,set,disconnect)
import Application.HXournal.Type.Event
import Application.HXournal.Type.Coroutine
import Application.HXournal.Type.Canvas
import Application.HXournal.Type.XournalState
import Application.HXournal.Type.PageArrangement
import Application.HXournal.Device
-- import Application.HXournal.View.Draw
import Application.HXournal.View.Coordinate
import Application.HXournal.Coroutine.EventConnect
import Application.HXournal.Coroutine.Draw
import Application.HXournal.Coroutine.Commit
import Application.HXournal.Accessor
import Application.HXournal.ModelAction.Page
import Application.HXournal.ModelAction.Eraser
import Application.HXournal.ModelAction.Layer
import Data.Xournal.Generic
import Data.Xournal.BBox
import Graphics.Xournal.Render.HitTest
import Graphics.Xournal.Render.BBoxMapPDF
import Control.Monad.Coroutine.SuspensionFunctors
import Control.Monad.Trans
import qualified Control.Monad.State as St
import Control.Category
import Data.Label
import qualified Data.IntMap as IM
import Prelude hiding ((.), id)
import Application.HXournal.Coroutine.Pen
-- |
eraserStart :: CanvasId
-> PointerCoord
-> MainCoroutine ()
eraserStart cid = commonPenStart eraserAction cid
where eraserAction _cinfo pnum geometry (cidup,cidmove) (x,y) = do
strs <- getAllStrokeBBoxInCurrentLayer
eraserProcess cid pnum geometry cidup cidmove strs (x,y)
-- |
eraserProcess :: CanvasId
-> PageNum
-> CanvasGeometry
-> ConnectId DrawingArea -> ConnectId DrawingArea
-> [StrokeBBox]
-> (Double,Double)
-> MainCoroutine ()
eraserProcess cid pnum geometry connidmove connidup strs (x0,y0) = do
r <- await
xst <- getSt
boxAction (f r xst) . getCanvasInfo cid $ xst
where
f :: (ViewMode a) => MyEvent -> HXournalState -> CanvasInfo a -> MainCoroutine ()
f r xstate cvsInfo = penMoveAndUpOnly r pnum geometry defact
(moveact xstate cvsInfo) upact
defact = eraserProcess cid pnum geometry connidup connidmove strs (x0,y0)
upact _ = disconnect connidmove >> disconnect connidup >> invalidateAll
moveact xstate cvsInfo (_pcoord,(x,y)) = do
let line = ((x0,y0),(x,y))
hittestbbox = mkHitTestBBox line strs
(hitteststroke,hitState) =
St.runState (hitTestStrokes line hittestbbox) False
if hitState
then do
page <- getCurrentPageCvsId cid
let currxoj = unView . get xournalstate $ xstate
pgnum = get currentPageNum cvsInfo
(mcurrlayer, currpage) = getCurrentLayerOrSet page
currlayer = maybe (error "eraserProcess") id mcurrlayer
let (newstrokes,maybebbox1) = St.runState (eraseHitted hitteststroke) Nothing
maybebbox = fmap (flip inflate 2.0) maybebbox1
newlayerbbox <- liftIO . updateLayerBuf maybebbox
. set g_bstrokes newstrokes $ currlayer
let newpagebbox = adjustCurrentLayer newlayerbbox currpage
newxojbbox = modify g_pages (IM.adjust (const newpagebbox) pgnum) currxoj
newxojstate = ViewAppendState newxojbbox
commit . set xournalstate newxojstate
=<< (liftIO (updatePageAll newxojstate xstate))
invalidateWithBuf cid
newstrs <- getAllStrokeBBoxInCurrentLayer
eraserProcess cid pnum geometry connidup connidmove newstrs (x,y)
else eraserProcess cid pnum geometry connidmove connidup strs (x,y)
| wavewave/hxournal | lib/Application/HXournal/Coroutine/Eraser.hs | bsd-2-clause | 4,097 | 0 | 19 | 922 | 934 | 515 | 419 | 76 | 2 |
{-# LANGUAGE RecordWildCards #-}
module Main where
import Control.Applicative
import Control.Exception
import Data.Aeson
import Data.Aeson.Diff
import qualified Data.ByteString.Char8 as BS
import qualified Data.ByteString.Lazy as BSL
import Data.Monoid
import Options.Applicative hiding (Success)
import Options.Applicative.Types hiding (Success)
import System.IO
type File = Maybe FilePath
-- | Command-line options.
data PatchOptions = PatchOptions
{ optionOut :: File -- ^ JSON destination
, optionPatch :: File -- ^ Patch input
, optionFrom :: File -- ^ JSON source
}
data Configuration = Configuration
{ cfgOut :: Handle
, cfgPatch :: Handle
, cfgFrom :: Handle
}
optionParser :: Parser PatchOptions
optionParser = PatchOptions
<$> option fileP
( long "output"
<> short 'o'
<> metavar "OUTPUT"
<> help "Destination for patched JSON."
<> value Nothing
)
<*> argument fileP
( metavar "PATCH"
<> help "Patch to apply."
)
<*> argument fileP
( metavar "FROM"
<> help "JSON file to patch."
)
where
fileP = do
s <- readerAsk
return $ case s of
"-" -> Nothing
_ -> Just s
jsonRead :: Handle -> IO Value
jsonRead fp = do
s <- BS.hGetContents fp
case decode (BSL.fromStrict s) of
Nothing -> error "Could not parse as JSON"
Just v -> return v
run :: PatchOptions -> IO ()
run opt = bracket (load opt) close process
where
openr :: Maybe FilePath -> IO Handle
openr Nothing = return stdin
openr (Just p) = openFile p ReadMode
openw :: Maybe FilePath -> IO Handle
openw Nothing = return stdout
openw (Just p) = openFile p WriteMode
load :: PatchOptions -> IO Configuration
load PatchOptions{..} =
Configuration
<$> openw optionOut
<*> openr optionPatch
<*> openr optionFrom
close :: Configuration -> IO ()
close Configuration{..} = do
hClose cfgPatch
hClose cfgFrom
hClose cfgOut
process :: Configuration -> IO ()
process Configuration{..} = do
json_patch <- jsonRead cfgPatch
json_from <- jsonRead cfgFrom
case fromJSON json_patch >>= flip patch json_from of
Error e -> error e
Success d -> BS.hPutStrLn cfgOut $ BSL.toStrict (encode d)
main :: IO ()
main = execParser opts >>= run
where
opts = info (helper <*> optionParser)
( fullDesc
<> progDesc "Generate a patch between two JSON documents.")
| thsutton/aeson-diff | src/patch.hs | bsd-2-clause | 2,671 | 0 | 14 | 835 | 734 | 368 | 366 | 77 | 3 |
#!/usr/local/bin/runhaskell
{-# LANGUAGE DeriveDataTypeable #-}
import Text.Hastache
import Text.Hastache.Context
import qualified Data.Text.Lazy.IO as TL
import Data.Data
import Data.Generics
main = hastacheStr defaultConfig (encodeStr template) context
>>= TL.putStrLn
-- begin example
data Hero = Hero { name :: String } deriving (Data, Typeable)
data Heroes = Heroes { heroes :: [Hero] } deriving (Data, Typeable)
template = concat [
"{{#heroes}}\n",
"* {{name}} \n",
"{{/heroes}}\n"]
context = mkGenericContext $ Heroes $ map Hero ["Nameless","Long Sky",
"Flying Snow","Broken Sword","Qin Shi Huang"]
| lymar/hastache | examples/listsGeneric.hs | bsd-3-clause | 639 | 0 | 9 | 111 | 164 | 97 | 67 | 16 | 1 |
{-# LANGUAGE Trustworthy #-}
{-# LANGUAGE NoImplicitPrelude #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE PolyKinds, DataKinds, TypeFamilies, TypeOperators, UndecidableInstances #-}
-----------------------------------------------------------------------------
-- |
-- Module : Data.Either
-- Copyright : (c) The University of Glasgow 2001
-- License : BSD-style (see the file libraries/base/LICENSE)
--
-- Maintainer : [email protected]
-- Stability : experimental
-- Portability : portable
--
-- The Either type, and associated operations.
--
-----------------------------------------------------------------------------
module Data.Either (
Either(..),
either,
lefts,
rights,
isLeft,
isRight,
partitionEithers,
) where
import GHC.Base
import GHC.Show
import GHC.Read
import Data.Type.Equality
-- $setup
-- Allow the use of some Prelude functions in doctests.
-- >>> import Prelude ( (+), (*), length, putStrLn )
{-
-- just for testing
import Test.QuickCheck
-}
{-|
The 'Either' type represents values with two possibilities: a value of
type @'Either' a b@ is either @'Left' a@ or @'Right' b@.
The 'Either' type is sometimes used to represent a value which is
either correct or an error; by convention, the 'Left' constructor is
used to hold an error value and the 'Right' constructor is used to
hold a correct value (mnemonic: \"right\" also means \"correct\").
==== __Examples__
The type @'Either' 'String' 'Int'@ is the type of values which can be either
a 'String' or an 'Int'. The 'Left' constructor can be used only on
'String's, and the 'Right' constructor can be used only on 'Int's:
>>> let s = Left "foo" :: Either String Int
>>> s
Left "foo"
>>> let n = Right 3 :: Either String Int
>>> n
Right 3
>>> :type s
s :: Either String Int
>>> :type n
n :: Either String Int
The 'fmap' from our 'Functor' instance will ignore 'Left' values, but
will apply the supplied function to values contained in a 'Right':
>>> let s = Left "foo" :: Either String Int
>>> let n = Right 3 :: Either String Int
>>> fmap (*2) s
Left "foo"
>>> fmap (*2) n
Right 6
The 'Monad' instance for 'Either' allows us to chain together multiple
actions which may fail, and fail overall if any of the individual
steps failed. First we'll write a function that can either parse an
'Int' from a 'Char', or fail.
>>> import Data.Char ( digitToInt, isDigit )
>>> :{
let parseEither :: Char -> Either String Int
parseEither c
| isDigit c = Right (digitToInt c)
| otherwise = Left "parse error"
>>> :}
The following should work, since both @\'1\'@ and @\'2\'@ can be
parsed as 'Int's.
>>> :{
let parseMultiple :: Either String Int
parseMultiple = do
x <- parseEither '1'
y <- parseEither '2'
return (x + y)
>>> :}
>>> parseMultiple
Right 3
But the following should fail overall, since the first operation where
we attempt to parse @\'m\'@ as an 'Int' will fail:
>>> :{
let parseMultiple :: Either String Int
parseMultiple = do
x <- parseEither 'm'
y <- parseEither '2'
return (x + y)
>>> :}
>>> parseMultiple
Left "parse error"
-}
data Either a b = Left a | Right b
deriving (Eq, Ord, Read, Show)
instance Functor (Either a) where
fmap _ (Left x) = Left x
fmap f (Right y) = Right (f y)
instance Applicative (Either e) where
pure = Right
Left e <*> _ = Left e
Right f <*> r = fmap f r
instance Monad (Either e) where
Left l >>= _ = Left l
Right r >>= k = k r
-- | Case analysis for the 'Either' type.
-- If the value is @'Left' a@, apply the first function to @a@;
-- if it is @'Right' b@, apply the second function to @b@.
--
-- ==== __Examples__
--
-- We create two values of type @'Either' 'String' 'Int'@, one using the
-- 'Left' constructor and another using the 'Right' constructor. Then
-- we apply \"either\" the 'length' function (if we have a 'String')
-- or the \"times-two\" function (if we have an 'Int'):
--
-- >>> let s = Left "foo" :: Either String Int
-- >>> let n = Right 3 :: Either String Int
-- >>> either length (*2) s
-- 3
-- >>> either length (*2) n
-- 6
--
either :: (a -> c) -> (b -> c) -> Either a b -> c
either f _ (Left x) = f x
either _ g (Right y) = g y
-- | Extracts from a list of 'Either' all the 'Left' elements.
-- All the 'Left' elements are extracted in order.
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> let list = [ Left "foo", Right 3, Left "bar", Right 7, Left "baz" ]
-- >>> lefts list
-- ["foo","bar","baz"]
--
lefts :: [Either a b] -> [a]
lefts x = [a | Left a <- x]
-- | Extracts from a list of 'Either' all the 'Right' elements.
-- All the 'Right' elements are extracted in order.
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> let list = [ Left "foo", Right 3, Left "bar", Right 7, Left "baz" ]
-- >>> rights list
-- [3,7]
--
rights :: [Either a b] -> [b]
rights x = [a | Right a <- x]
-- | Partitions a list of 'Either' into two lists.
-- All the 'Left' elements are extracted, in order, to the first
-- component of the output. Similarly the 'Right' elements are extracted
-- to the second component of the output.
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> let list = [ Left "foo", Right 3, Left "bar", Right 7, Left "baz" ]
-- >>> partitionEithers list
-- (["foo","bar","baz"],[3,7])
--
-- The pair returned by @'partitionEithers' x@ should be the same
-- pair as @('lefts' x, 'rights' x)@:
--
-- >>> let list = [ Left "foo", Right 3, Left "bar", Right 7, Left "baz" ]
-- >>> partitionEithers list == (lefts list, rights list)
-- True
--
partitionEithers :: [Either a b] -> ([a],[b])
partitionEithers = foldr (either left right) ([],[])
where
left a ~(l, r) = (a:l, r)
right a ~(l, r) = (l, a:r)
-- | Return `True` if the given value is a `Left`-value, `False` otherwise.
--
-- @since 4.7.0.0
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> isLeft (Left "foo")
-- True
-- >>> isLeft (Right 3)
-- False
--
-- Assuming a 'Left' value signifies some sort of error, we can use
-- 'isLeft' to write a very simple error-reporting function that does
-- absolutely nothing in the case of success, and outputs \"ERROR\" if
-- any error occurred.
--
-- This example shows how 'isLeft' might be used to avoid pattern
-- matching when one does not care about the value contained in the
-- constructor:
--
-- >>> import Control.Monad ( when )
-- >>> let report e = when (isLeft e) $ putStrLn "ERROR"
-- >>> report (Right 1)
-- >>> report (Left "parse error")
-- ERROR
--
isLeft :: Either a b -> Bool
isLeft (Left _) = True
isLeft (Right _) = False
-- | Return `True` if the given value is a `Right`-value, `False` otherwise.
--
-- @since 4.7.0.0
--
-- ==== __Examples__
--
-- Basic usage:
--
-- >>> isRight (Left "foo")
-- False
-- >>> isRight (Right 3)
-- True
--
-- Assuming a 'Left' value signifies some sort of error, we can use
-- 'isRight' to write a very simple reporting function that only
-- outputs \"SUCCESS\" when a computation has succeeded.
--
-- This example shows how 'isRight' might be used to avoid pattern
-- matching when one does not care about the value contained in the
-- constructor:
--
-- >>> import Control.Monad ( when )
-- >>> let report e = when (isRight e) $ putStrLn "SUCCESS"
-- >>> report (Left "parse error")
-- >>> report (Right 1)
-- SUCCESS
--
isRight :: Either a b -> Bool
isRight (Left _) = False
isRight (Right _) = True
-- instance for the == Boolean type-level equality operator
type family EqEither a b where
EqEither ('Left x) ('Left y) = x == y
EqEither ('Right x) ('Right y) = x == y
EqEither _a _b = 'False
type instance a == b = EqEither a b
{-
{--------------------------------------------------------------------
Testing
--------------------------------------------------------------------}
prop_partitionEithers :: [Either Int Int] -> Bool
prop_partitionEithers x =
partitionEithers x == (lefts x, rights x)
-}
| gridaphobe/ghc | libraries/base/Data/Either.hs | bsd-3-clause | 8,020 | 1 | 9 | 1,702 | 847 | 509 | 338 | 50 | 1 |
{-# LANGUAGE UndecidableInstances #-}
module Database.Esqueleto.Join where
import Data.Singletons.TH
import Database.Esqueleto
import Prelude
$(singletons [d|
data MaybeCon = Present | Absent deriving (Eq, Show)
|])
type family MaybeMaybe (a :: MaybeCon) (b :: *) :: * where
MaybeMaybe 'Present b = Maybe b
MaybeMaybe 'Absent b = b
type family Joins (a :: [*]) :: * where
Joins (a ': rest) = JoinsInternal rest (SqlExpr (Entity a))
type family JoinsInternal (a :: [*]) (b :: *) :: * where
JoinsInternal '[a] acc = InnerJoin acc (SqlExpr (Entity a))
JoinsInternal (a ': rest) acc = JoinsInternal rest (InnerJoin acc (SqlExpr (Entity a)))
type PairSig a b c d =
( (SMaybeCon c, EntityField a (MaybeMaybe c (JoinKey a b)))
, (SMaybeCon d, EntityField b (MaybeMaybe d (JoinKey a b)))
)
class FieldPair a b c d | a b -> c, a b -> d where
type JoinKey a b
pair :: PairSig a b c d
class Split join where
split :: a `join` b -> (a, b)
instance Split InnerJoin where
split (a `InnerJoin` b) = (a, b)
instance Split LeftOuterJoin where
split (a `LeftOuterJoin` b) = (a, b)
instance Split RightOuterJoin where
split (a `RightOuterJoin` b) = (a, b)
instance Split FullOuterJoin where
split (a `FullOuterJoin` b) = (a, b)
instance Split CrossJoin where
split (a `CrossJoin` b) = (a, b)
split3 :: (Split join2, Split join1) => a `join1` b `join2` c -> (a `join1` b, b, c)
split3 abc =
(ab, b, c)
where
(ab, c) = split abc
(_, b) = split ab
class JoinPair a b where
joinPair :: SqlExpr a -> SqlExpr b -> SqlQuery ()
instance (FieldPair a b c d, PersistField (JoinKey a b), PersistEntity a, PersistEntity b) => JoinPair (Entity a) (Entity b) where
joinPair a b =
on condition
where
((aMC, aField), (bMC, bField)) = pair :: PairSig a b c d
condition =
case (aMC, bMC) of
(SAbsent, SAbsent) -> a ^. aField ==. b ^. bField
(SPresent, SPresent) -> a ^. aField ==. b ^. bField
(SPresent, SAbsent) -> a ^. aField ==. just (b ^. bField)
(SAbsent, SPresent) -> just (a ^. aField) ==. b ^. bField
instance (FieldPair a b c d, PersistField (JoinKey a b), PersistEntity a, PersistEntity b) => JoinPair (Maybe (Entity a)) (Maybe (Entity b)) where
joinPair a b =
on condition
where
((aMC, aField), (bMC, bField)) = pair :: PairSig a b c d
condition =
case (aMC, bMC) of
(SAbsent, SAbsent) -> a ?. aField ==. b ?. bField
(SPresent, SPresent) -> a ?. aField ==. b ?. bField
(SPresent, SAbsent) -> a ?. aField ==. just (b ?. bField)
(SAbsent, SPresent) -> just (a ?. aField) ==. b ?. bField
instance (FieldPair a b c d, PersistField (JoinKey a b), PersistEntity a, PersistEntity b) => JoinPair (Entity a) (Maybe (Entity b)) where
joinPair a b =
on condition
where
((aMC, aField), (bMC, bField)) = pair :: PairSig a b c d
condition =
case (aMC, bMC) of
(SAbsent, SAbsent) -> just (a ^. aField) ==. b ?. bField
(SPresent, SPresent) -> just (a ^. aField) ==. b ?. bField
(SPresent, SAbsent) -> a ^. aField ==. b ?. bField
(SAbsent, SPresent) -> just (just (a ^. aField)) ==. b ?. bField
instance (FieldPair a b c d, PersistField (JoinKey a b), PersistEntity a, PersistEntity b) => JoinPair (Maybe (Entity a)) (Entity b) where
joinPair a b =
on condition
where
((aMC, aField), (bMC, bField)) = pair :: PairSig a b c d
condition =
case (aMC, bMC) of
(SAbsent, SAbsent) -> a ?. aField ==. just (b ^. bField)
(SPresent, SPresent) -> a ?. aField ==. just (b ^. bField)
(SPresent, SAbsent) -> a ?. aField ==. just (just (b ^. bField))
(SAbsent, SPresent) -> a ?. aField ==. b ^. bField
class Join a where
join :: a -> SqlQuery ()
instance (JoinPair a b, Split join) => Join (SqlExpr a `join` SqlExpr b) where
join ab = uncurry joinPair $ split ab
instance (Split join1, Split join2, JoinPair b c, Join (a `join1` SqlExpr b)) => Join (a `join1` SqlExpr b `join2` SqlExpr c) where
join xs =
joinPair l r *> join rest
where
(rest, l, r) = split3 xs
| pseudonom/dovetail | src/Database/Esqueleto/Join.hs | bsd-3-clause | 4,240 | 7 | 17 | 1,139 | 1,849 | 1,008 | 841 | -1 | -1 |
{-# LANGUAGE BangPatterns #-}
module Main where
import qualified Data.ByteString as B
import qualified Crypto.MAC.SipHash as SipHash
import Criterion.Main
main = do
let !bs5 = B.pack [0..4]
!bs8 = B.pack [0..7]
!bs11 = B.pack [0..10]
!bs40 = B.pack [0..39]
!bs1Mb = B.pack . map fromIntegral $ [0..999999::Int]
let !k = SipHash.SipKey 0 0
let !hash = SipHash.hash k
defaultMain
[ bgroup "Hash"
[ bench "5" $ whnf hash bs5
, bench "8" $ whnf hash bs8
, bench "11" $ whnf hash bs11
, bench "40" $ whnf hash bs40
, bench "2^20" $ whnf hash bs1Mb
]
]
| vincenthz/hs-siphash | Benchs/Bench.hs | bsd-3-clause | 685 | 0 | 12 | 237 | 255 | 124 | 131 | 20 | 1 |
module Text.Highlighter.Lexers.Bash (lexer) where
import Text.Regex.PCRE.Light
import Text.Highlighter.Types
lexer :: Lexer
lexer = Lexer
{ lName = "Bash"
, lAliases = ["bash", "sh", "ksh"]
, lExtensions = [".sh", ".ksh", ".bash", ".ebuild", ".eclass"]
, lMimetypes = ["application/x-sh", "application/x-shellscript"]
, lStart = root'
, lFlags = [multiline]
}
curly' :: TokenMatcher
curly' =
[ tokNext "}" (Arbitrary "Keyword") Pop
, tok ":-" (Arbitrary "Keyword")
, tok "[a-zA-Z0-9_]+" (Arbitrary "Name" :. Arbitrary "Variable")
, tok "[^}:\"\\'`$]+" (Arbitrary "Punctuation")
, tok ":" (Arbitrary "Punctuation")
, anyOf root'
]
backticks' :: TokenMatcher
backticks' =
[ tokNext "`" (Arbitrary "Literal" :. Arbitrary "String" :. Arbitrary "Backtick") Pop
, anyOf root'
]
root' :: TokenMatcher
root' =
[ anyOf basic'
, tokNext "\\$\\(\\(" (Arbitrary "Keyword") (GoTo math')
, tokNext "\\$\\(" (Arbitrary "Keyword") (GoTo paren')
, tokNext "\\${#?" (Arbitrary "Keyword") (GoTo curly')
, tokNext "`" (Arbitrary "Literal" :. Arbitrary "String" :. Arbitrary "Backtick") (GoTo backticks')
, anyOf data'
]
basic' :: TokenMatcher
basic' =
[ tok "\\b(if|fi|else|while|do|done|for|then|return|function|case|select|continue|until|esac|elif)\\s*\\b" (Arbitrary "Keyword")
, tok "\\b(alias|bg|bind|break|builtin|caller|cd|command|compgen|complete|declare|dirs|disown|echo|enable|eval|exec|exit|export|false|fc|fg|getopts|hash|help|history|jobs|kill|let|local|logout|popd|printf|pushd|pwd|read|readonly|set|shift|shopt|source|suspend|test|time|times|trap|true|type|typeset|ulimit|umask|unalias|unset|wait)\\s*\\b(?!\\.)" (Arbitrary "Name" :. Arbitrary "Builtin")
, tok "#.*\\n" (Arbitrary "Comment")
, tok "\\\\[\\w\\W]" (Arbitrary "Literal" :. Arbitrary "String" :. Arbitrary "Escape")
, tok "(\\b\\w+)(\\s*)(=)" (ByGroups [(Arbitrary "Name" :. Arbitrary "Variable"), (Arbitrary "Text"), (Arbitrary "Operator")])
, tok "[\\[\\]{}()=]" (Arbitrary "Operator")
, tok "<<-?\\s*(\\'?)\\\\?(\\w+)[\\w\\W]+?\\2" (Arbitrary "Literal" :. Arbitrary "String")
, tok "&&|\\|\\|" (Arbitrary "Operator")
]
paren' :: TokenMatcher
paren' =
[ tokNext "\\)" (Arbitrary "Keyword") Pop
, anyOf root'
]
data' :: TokenMatcher
data' =
[ tok "(?s)\\$?\"(\\\\\\\\|\\\\[0-7]+|\\\\.|[^\"\\\\])*\"" (Arbitrary "Literal" :. Arbitrary "String" :. Arbitrary "Double")
, tok "(?s)\\$?'(\\\\\\\\|\\\\[0-7]+|\\\\.|[^'\\\\])*'" (Arbitrary "Literal" :. Arbitrary "String" :. Arbitrary "Single")
, tok ";" (Arbitrary "Text")
, tok "\\s+" (Arbitrary "Text")
, tok "[^=\\s\\n\\[\\]{}()$\"\\'`\\\\<]+" (Arbitrary "Text")
, tok "\\d+(?= |\\Z)" (Arbitrary "Literal" :. Arbitrary "Number")
, tok "\\$#?(\\w+|.)" (Arbitrary "Name" :. Arbitrary "Variable")
, tok "<" (Arbitrary "Text")
]
math' :: TokenMatcher
math' =
[ tokNext "\\)\\)" (Arbitrary "Keyword") Pop
, tok "[-+*/%^|&]|\\*\\*|\\|\\|" (Arbitrary "Operator")
, tok "\\d+" (Arbitrary "Literal" :. Arbitrary "Number")
, anyOf root'
]
| chemist/highlighter | src/Text/Highlighter/Lexers/Bash.hs | bsd-3-clause | 3,145 | 0 | 12 | 535 | 819 | 427 | 392 | 61 | 1 |
{-|
Module : Data.BinaryIndexedTree.ST
Description : Binary Indexed Trees (a.k.a. Fenwick Trees)
Copyright : (c) 2012 Maxwell Sayles.
License : BSD3
Maintainer : [email protected]
Stability : stable
Portability : portable
Implements mutable binary indexed trees (a.k.a. Fenwick Trees)
in O(logn) for increment and lookup and O(n) for creation.
Original concept from Peter M. Fenwick (1994)
\"/A new data structure for cumulative frequency tables/\"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.14.8917>.
Index i in the tree represents the sum of all values of
indexes j<=i for some array.
Indexes start at 1.
-}
module Data.BinaryIndexedTree.ST
(BinaryIndexedTree, new, (!), increment)
where
import Control.Applicative
import Control.Monad
import Control.Monad.ST
import Data.Array.MArray
import Data.Array.ST
import Data.Bits
{-| Binary Indexed Tree -}
type BinaryIndexedTree s = STUArray s Int Int
{-| Construct a new binary indexed tree on the indexes 1 through n. -}
new :: Int -> ST s (BinaryIndexedTree s)
new n = newListArray (1, n) $ repeat 0
{-| Compute the sum of all indexes 1 through i, inclusive. Takes O(logn). -}
(!) :: BinaryIndexedTree s -> Int -> ST s Int
(!) bit i = f i 0
where f i acc
| i < 1 = return acc
| otherwise =
do acc' <- (+acc) <$> readArray bit i
let i' = i - (i .&. (-i))
f i' acc'
{-| Increment the value at index i. Takes O(logn). -}
increment :: Int -> Int -> BinaryIndexedTree s -> ST s ()
increment i v bit =
do (_, u) <- getBounds bit
let f i = when (i <= u) $ do
writeArray bit i . (+v) =<< readArray bit i
let i' = i + (i .&. (-i))
f i'
f i | maxwellsayles/binary-indexed-tree | Data/BinaryIndexedTree/ST.hs | bsd-3-clause | 1,795 | 0 | 20 | 472 | 389 | 202 | 187 | 27 | 1 |
module HipBot.Naggy.Scheduling
( startReminder
, stopReminder
, stopReminders
) where
import Control.Concurrent
import Control.Concurrent.STM
import Control.Lens
import Control.Monad.Reader
import Data.Foldable
import qualified Data.HashMap.Strict as HashMap
import Data.Monoid
import qualified Data.Set as Set
import Data.Time.Calendar
import Data.Time.Calendar.WeekDate
import Data.Time.Clock
import Data.Time.LocalTime
import Data.Time.Zones
import Data.Time.Zones.All
import Prelude hiding (elem)
import System.Log.Logger
import HipBot
import HipBot.Naggy.Types
startReminder :: Reminder -> Naggy ()
startReminder r = do
tvar <- view threads
let
oid = r ^. oauthId
rid = r ^. ident
stopReminder oid rid
tid <- forkNaggy (runReminder r)
liftIO . atomically . modifyTVar' tvar $
(at oid . non HashMap.empty . at rid .~ Just tid)
runReminder :: Reminder -> Naggy ()
runReminder r = do
liftIO $ do
delay <- microsToNext r
now <- getCurrentTime
debugM "naggy" $ mconcat
[ "Pausing at "
, show now
, " for "
, show delay
, "ms for reminder "
, show r
]
threadDelay delay
bot <- view hipBot
e <- sendNotification bot (r ^. oauthId) (r ^. roomId . to Right) (r ^. notification)
liftIO . traverse_ (errorM "naggy" . show) $ e
runReminder r
microsToNext :: Reminder -> IO Int
microsToNext r = case r ^. repeating of
Weekly _ days -> getCurrentTime <&> \nowUtc ->
let
z = tzByLabel $ r ^. tz
tLocal = utcToLocalTimeTZ z nowUtc
weekDay :: WeekDay
weekDay = toEnum . (`mod` 7) . view _3 . toWeekDate . localDay $ tLocal
tLocal' = tLocal { localTimeOfDay = TimeOfDay (r ^. time . hour) (r ^. time . minute) 0 }
in
diff nowUtc z $ if weekDay `elem` days && tLocal < tLocal'
then tLocal'
else addingDays tLocal' $ case Set.lookupGT weekDay days of
Just d -> fromEnum d - fromEnum weekDay
Nothing ->
let d = Set.findMin days
in fromEnum (maxBound :: WeekDay) - fromEnum weekDay + fromEnum d + 1
diff :: Integral c => UTCTime -> TZ -> LocalTime -> c
diff a z = round . (* 1000000) . flip diffUTCTime a . localTimeToUTCTZ z
addingDays :: Integral a => LocalTime -> a -> LocalTime
addingDays lt n = lt { localDay = addDays (fromIntegral n) (localDay lt) }
stopReminder :: OAuthId -> ReminderId -> Naggy ()
stopReminder oid rid = do
tvar <- view threads
liftIO $ do
tid <- atomically $ do
xs <- readTVar tvar
writeTVar tvar $ xs & at oid . non HashMap.empty . at rid .~ Nothing
return $ xs ^. at oid . non HashMap.empty . at rid
traverse_ killThread tid
stopReminders :: OAuthId -> Naggy ()
stopReminders oid = do
tvar <- view threads
liftIO $ do
tids <- atomically $ do
xs <- readTVar tvar
writeTVar tvar $ xs & at oid .~ Nothing
return $ xs ^. at oid . non HashMap.empty . to HashMap.elems
traverse_ killThread tids
| purefn/naggy | src/HipBot/Naggy/Scheduling.hs | bsd-3-clause | 2,977 | 0 | 24 | 745 | 1,080 | 537 | 543 | -1 | -1 |
{-|
Module : W
Description : Merge sort algorithm
Copyright : (c) Maksymilian Wojczuk, 2017
Implementation of merge sort alghoritm.
-}
module MergeSort
( mergeSort,
merge
) where
-- |Function takes 2 sorted lists and merges them into one sorted list
merge :: Ord a => [a] -> [a] -> [a]
merge xs [] = xs --If one of the list is empty function returns the not empty list
merge [] xs = xs
merge (x:xs) (y:ys)
| (x <= y) = x : merge xs (y:ys)
| otherwise = y : merge (x:xs) ys --the first element is the smaller one
-- |Sorts list of ord elements using merge sort algorithm
mergeSort :: Ord a => [a] -> [a]
mergeSort [] = []
mergeSort [x] = [x]
mergeSort xs = merge (mergeSort (fsthalf xs)) (mergeSort (sndhalf xs)) where
--checked multiple functions dividing list in half, but the fastest ones were:
fsthalf = take (length xs `div` 2)
sndhalf = drop (length xs `div` 2)
| maxiwoj/HaskellProject | src/MergeSort.hs | bsd-3-clause | 906 | 0 | 10 | 204 | 276 | 148 | 128 | 15 | 1 |
----------------------------------------------------------------------------
-- |
-- Module : Control.Monad.ErrorExcept
-- Copyright : (c) Sergey Vinokurov 2018
-- License : BSD3-style (see LICENSE)
-- Maintainer : [email protected]
----------------------------------------------------------------------------
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveFunctor #-}
{-# LANGUAGE FlexibleContexts #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE MultiParamTypeClasses #-}
{-# LANGUAGE StandaloneDeriving #-}
{-# LANGUAGE TypeFamilies #-}
{-# LANGUAGE UndecidableInstances #-}
module Control.Monad.ErrorExcept
( ErrorExceptT
, runErrorExceptT
) where
import Control.Monad.Base
import Control.Monad.Catch
import Control.Monad.Except
import Control.Monad.Reader
import Control.Monad.State
import Control.Monad.Trans.Control
import Control.Monad.Writer
import Control.Monad.Filesystem (MonadFS)
import Control.Monad.Logging (MonadLog)
newtype ErrorExceptT e m a = ErrorExceptT { unErrorExceptT :: m a }
deriving
( Functor
, Applicative
, Monad
, MonadReader r
, MonadState s
, MonadWriter w
, MonadBase b
, MonadCatch
, MonadMask
, MonadThrow
, MonadLog
)
deriving instance (MonadBaseControl IO m, MonadMask m) => MonadFS (ErrorExceptT e m)
instance MonadTrans (ErrorExceptT e) where
{-# INLINE lift #-}
lift = ErrorExceptT
instance MonadTransControl (ErrorExceptT e) where
type StT (ErrorExceptT e) a = a
{-# INLINE liftWith #-}
{-# INLINE restoreT #-}
liftWith f = ErrorExceptT $ f unErrorExceptT
restoreT = ErrorExceptT
instance MonadBaseControl b m => MonadBaseControl b (ErrorExceptT e m) where
type StM (ErrorExceptT e m) a = StM m (StT (ErrorExceptT e) a)
{-# INLINE liftBaseWith #-}
{-# INLINE restoreM #-}
liftBaseWith = defaultLiftBaseWith
restoreM = defaultRestoreM
instance (Exception e, MonadThrow m, MonadCatch m, MonadBase IO m) => MonadError e (ErrorExceptT e m) where
{-# INLINE throwError #-}
throwError = throwM
catchError action handler = ErrorExceptT $
catch (unErrorExceptT action) (unErrorExceptT . handler)
runErrorExceptT :: (Exception e, MonadCatch m) => ErrorExceptT e m a -> m (Either e a)
runErrorExceptT (ErrorExceptT action) =
handle (pure . Left) $ Right <$> action
| sergv/tags-server | src/Control/Monad/ErrorExcept.hs | bsd-3-clause | 2,454 | 0 | 10 | 497 | 512 | 287 | 225 | 58 | 1 |
{-# LANGUAGE LambdaCase #-}
module Data.ListParse where
import Control.Applicative
import Control.Monad
-- Quick and dirty monadic parser for list inputs
data Parser a r = Parser { parse :: [a] -> Either String ([a], r) }
instance Functor (Parser a) where
fmap f (Parser p) = Parser (fmap (fmap f) . p)
instance Monad (Parser a) where
return x = Parser $ \s -> Right (s, x)
Parser p >>= f = Parser $ \s ->
case p s of
Left e -> Left e
Right (s', r) -> parse (f r) s'
instance Applicative (Parser a) where
pure = return
(<*>) = ap
instance Alternative (Parser a) where
empty = Parser $ const (Left "empty")
Parser p1 <|> Parser p2 = Parser $ \s ->
case p1 s of
Left _ -> p2 s
Right (s', r) -> Right (s', r)
token :: Eq a => a -> Parser a ()
token a = Parser $ \case
t:ts -> if t == a
then Right (ts, ())
else Left "token mismatch"
_ -> Left "No token to match"
anyToken :: Parser a a
anyToken = Parser $ \case
[] -> Left "No token"
t:ts -> Right (ts, t)
endOfInput :: Parser a ()
endOfInput = Parser $ \case
[] -> Right ([], ())
_ -> Left "Unexpected token at end of input"
| bhamrick/wordpairs | Data/ListParse.hs | bsd-3-clause | 1,215 | 0 | 13 | 365 | 526 | 270 | 256 | 36 | 3 |
import Control.Monad (when)
import Data.List (intercalate)
import Data.List.Split (splitOn)
import Development.Shake.Config
import Need
import Node
import Paths
import Shake.BuildNode
import System.Directory (getCurrentDirectory)
makeSetUpData :: FilePath -> String
makeSetUpData projdir =
let rplc before after s = intercalate after . splitOn before $ s
escape = rplc ")" "\\)" . rplc "(" "\\("
ps =
concat $
zipWith (pathsTractMeasures projdir)
[1 ..]
(tractMeasuresFromCaseid "$case")
ps' =
concat $
zipWith (pathsWmql projdir)
[1 ..]
(wmqlFromCaseid "$case")
in unlines $
["caselist=" ++ projdir ++ "/config/caselist.txt"] ++
map (\(k,v) -> k ++ "=" ++ escape v) ps
main :: IO ()
main =
shakeArgsWith
shakeOptions {shakeFiles = Paths.outdir
,shakeVerbosity = Chatty
,shakeReport = map (combine Paths.outdir) ["report.html","report.json"]}
[] $
\_ caseids ->
return $
Just $
do usingConfigFile "config/settings.cfg"
action $
if null caseids
then do projdir <- liftIO $ getCurrentDirectory
writeFile' (Paths.outdir </> "SetUpData.sh") $
makeSetUpData projdir
else let tractMeasureNodes =
concatMap tractMeasuresFromCaseid caseids
fsInDwiNodes = concatMap fsInDwiFromCaseid caseids
ukfNodes = concatMap ukfFromCaseid caseids
wmqlNodes = concatMap wmqlFromCaseid caseids
dwiNodes = concatMap dwiFromCaseid caseids
in do when (not $ null tractMeasureNodes)
(do needs (tractMeasureNodes :: [TractMeasures])
return ())
when (not $ null fsInDwiNodes)
(do needs (fsInDwiNodes :: [FsInDwi])
return ())
when (not $ null dwiNodes)
(do needs (dwiNodes :: [Dwi])
return ())
rules
| reckbo/ppl | pipeline-cli/Main.hs | bsd-3-clause | 2,331 | 0 | 22 | 971 | 573 | 292 | 281 | 58 | 2 |
module Combinations where
-- | List all combinations of length n of a list (6 kyu)
-- | Link: https://biturl.io/Combinations
-- | My original solution
combinations :: Int -> [a] -> [[a]]
combinations n xs =
filter ((n ==) . length) $ foldr (\a b -> b ++ map (a :) b) [[]] xs
-- | Refactored solution I came up with after completition of this kata
-- more efficient, if a bit longer and verbose
combinations' :: Int -> [a] -> [[a]]
combinations' _ [] = [[]]
combinations' 0 _ = [[]]
combinations' n (x:xs) = xcomb ++ rest
where
xcomb = [x : other | other <- combinations (n - 1) xs]
rest =
if n <= length xs
then combinations n xs
else []
-- | A cleaner and more elegant solution I found on Haskell Wiki
combinations'' :: Int -> [a] -> [[a]]
combinations'' 0 _ = [[]]
combinations'' n xs =
[ y : ys
| (y, i) <- zip xs [0 .. (length xs - 1)]
, ys <- combinations'' (n - 1) (drop (i + 1) xs)
]
| Eugleo/Code-Wars | src/combinatorics-kata/Combinations.hs | bsd-3-clause | 937 | 0 | 12 | 231 | 355 | 196 | 159 | 19 | 2 |
{-# LANGUAGE OverloadedStrings #-}
module ByteStringToBase64
where
import Data.ByteString.Lazy (ByteString)
import qualified Data.ByteString.Lazy as B
import Data.ByteString.Base64.Lazy (encode)
import qualified Data.ByteString.Lazy.Char8 as BC
import Data.Text (Text)
import qualified Data.Text as T
import Network.Mime (defaultMimeLookup)
byteStringToBase64 :: ByteString -> Text -> ByteString
byteStringToBase64 bytestring extension =
B.concat [BC.pack "data:", B.fromStrict $ defaultMimeLookup $ T.concat [".", extension], BC.pack ";base64,", encode bytestring]
| stla/jsonxlsx | src/ByteStringToBase64.hs | bsd-3-clause | 662 | 0 | 9 | 155 | 149 | 90 | 59 | 12 | 1 |
{-# LANGUAGE BangPatterns, RecordWildCards, GADTs #-}
module GHC.Cmm.LayoutStack (
cmmLayoutStack, setInfoTableStackMap
) where
import GhcPrelude hiding ((<*>))
import GHC.StgToCmm.Utils ( callerSaveVolatileRegs, newTemp ) -- XXX layering violation
import GHC.StgToCmm.Foreign ( saveThreadState, loadThreadState ) -- XXX layering violation
import BasicTypes
import GHC.Cmm
import GHC.Cmm.Info
import GHC.Cmm.BlockId
import GHC.Cmm.CLabel
import GHC.Cmm.Utils
import GHC.Cmm.Graph
import ForeignCall
import GHC.Cmm.Liveness
import GHC.Cmm.ProcPoint
import GHC.Runtime.Layout
import GHC.Cmm.Dataflow.Block
import GHC.Cmm.Dataflow.Collections
import GHC.Cmm.Dataflow
import GHC.Cmm.Dataflow.Graph
import GHC.Cmm.Dataflow.Label
import UniqSupply
import Maybes
import UniqFM
import Util
import DynFlags
import FastString
import Outputable hiding ( isEmpty )
import qualified Data.Set as Set
import Control.Monad.Fix
import Data.Array as Array
import Data.Bits
import Data.List (nub)
{- Note [Stack Layout]
The job of this pass is to
- replace references to abstract stack Areas with fixed offsets from Sp.
- replace the CmmHighStackMark constant used in the stack check with
the maximum stack usage of the proc.
- save any variables that are live across a call, and reload them as
necessary.
Before stack allocation, local variables remain live across native
calls (CmmCall{ cmm_cont = Just _ }), and after stack allocation local
variables are clobbered by native calls.
We want to do stack allocation so that as far as possible
- stack use is minimized, and
- unnecessary stack saves and loads are avoided.
The algorithm we use is a variant of linear-scan register allocation,
where the stack is our register file.
We proceed in two passes, see Note [Two pass approach] for why they are not easy
to merge into one.
Pass 1:
- First, we do a liveness analysis, which annotates every block with
the variables live on entry to the block.
- We traverse blocks in reverse postorder DFS; that is, we visit at
least one predecessor of a block before the block itself. The
stack layout flowing from the predecessor of the block will
determine the stack layout on entry to the block.
- We maintain a data structure
Map Label StackMap
which describes the contents of the stack and the stack pointer on
entry to each block that is a successor of a block that we have
visited.
- For each block we visit:
- Look up the StackMap for this block.
- If this block is a proc point (or a call continuation, if we aren't
splitting proc points), we need to reload all the live variables from the
stack - but this is done in Pass 2, which calculates more precise liveness
information (see description of Pass 2).
- Walk forwards through the instructions:
- At an assignment x = Sp[loc]
- Record the fact that Sp[loc] contains x, so that we won't
need to save x if it ever needs to be spilled.
- At an assignment x = E
- If x was previously on the stack, it isn't any more
- At the last node, if it is a call or a jump to a proc point
- Lay out the stack frame for the call (see setupStackFrame)
- emit instructions to save all the live variables
- Remember the StackMaps for all the successors
- emit an instruction to adjust Sp
- If the last node is a branch, then the current StackMap is the
StackMap for the successors.
- Manifest Sp: replace references to stack areas in this block
with real Sp offsets. We cannot do this until we have laid out
the stack area for the successors above.
In this phase we also eliminate redundant stores to the stack;
see elimStackStores.
- There is one important gotcha: sometimes we'll encounter a control
transfer to a block that we've already processed (a join point),
and in that case we might need to rearrange the stack to match
what the block is expecting. (exactly the same as in linear-scan
register allocation, except here we have the luxury of an infinite
supply of temporary variables).
- Finally, we update the magic CmmHighStackMark constant with the
stack usage of the function, and eliminate the whole stack check
if there was no stack use. (in fact this is done as part of the
main traversal, by feeding the high-water-mark output back in as
an input. I hate cyclic programming, but it's just too convenient
sometimes.)
There are plenty of tricky details: update frames, proc points, return
addresses, foreign calls, and some ad-hoc optimisations that are
convenient to do here and effective in common cases. Comments in the
code below explain these.
Pass 2:
- Calculate live registers, but taking into account that nothing is live at the
entry to a proc point.
- At each proc point and call continuation insert reloads of live registers from
the stack (they were saved by Pass 1).
Note [Two pass approach]
The main reason for Pass 2 is being able to insert only the reloads that are
needed and the fact that the two passes need different liveness information.
Let's consider an example:
.....
\ /
D <- proc point
/ \
E F
\ /
G <- proc point
|
X
Pass 1 needs liveness assuming that local variables are preserved across calls.
This is important because it needs to save any local registers to the stack
(e.g., if register a is used in block X, it must be saved before any native
call).
However, for Pass 2, where we want to reload registers from stack (in a proc
point), this is overly conservative and would lead us to generate reloads in D
for things used in X, even though we're going to generate reloads in G anyway
(since it's also a proc point).
So Pass 2 calculates liveness knowing that nothing is live at the entry to a
proc point. This means that in D we only need to reload things used in E or F.
This can be quite important, for an extreme example see testcase for #3294.
Merging the two passes is not trivial - Pass 2 is a backward rewrite and Pass 1
is a forward one. Furthermore, Pass 1 is creating code that uses local registers
(saving them before a call), which the liveness analysis for Pass 2 must see to
be correct.
-}
-- All stack locations are expressed as positive byte offsets from the
-- "base", which is defined to be the address above the return address
-- on the stack on entry to this CmmProc.
--
-- Lower addresses have higher StackLocs.
--
type StackLoc = ByteOff
{-
A StackMap describes the stack at any given point. At a continuation
it has a particular layout, like this:
| | <- base
|-------------|
| ret0 | <- base + 8
|-------------|
. upd frame . <- base + sm_ret_off
|-------------|
| |
. vars .
. (live/dead) .
| | <- base + sm_sp - sm_args
|-------------|
| ret1 |
. ret vals . <- base + sm_sp (<--- Sp points here)
|-------------|
Why do we include the final return address (ret0) in our stack map? I
have absolutely no idea, but it seems to be done that way consistently
in the rest of the code generator, so I played along here. --SDM
Note that we will be constructing an info table for the continuation
(ret1), which needs to describe the stack down to, but not including,
the update frame (or ret0, if there is no update frame).
-}
data StackMap = StackMap
{ sm_sp :: StackLoc
-- ^ the offset of Sp relative to the base on entry
-- to this block.
, sm_args :: ByteOff
-- ^ the number of bytes of arguments in the area for this block
-- Defn: the offset of young(L) relative to the base is given by
-- (sm_sp - sm_args) of the StackMap for block L.
, sm_ret_off :: ByteOff
-- ^ Number of words of stack that we do not describe with an info
-- table, because it contains an update frame.
, sm_regs :: UniqFM (LocalReg,StackLoc)
-- ^ regs on the stack
}
instance Outputable StackMap where
ppr StackMap{..} =
text "Sp = " <> int sm_sp $$
text "sm_args = " <> int sm_args $$
text "sm_ret_off = " <> int sm_ret_off $$
text "sm_regs = " <> pprUFM sm_regs ppr
cmmLayoutStack :: DynFlags -> ProcPointSet -> ByteOff -> CmmGraph
-> UniqSM (CmmGraph, LabelMap StackMap)
cmmLayoutStack dflags procpoints entry_args
graph@(CmmGraph { g_entry = entry })
= do
-- We need liveness info. Dead assignments are removed later
-- by the sinking pass.
let liveness = cmmLocalLiveness dflags graph
blocks = revPostorder graph
(final_stackmaps, _final_high_sp, new_blocks) <-
mfix $ \ ~(rec_stackmaps, rec_high_sp, _new_blocks) ->
layout dflags procpoints liveness entry entry_args
rec_stackmaps rec_high_sp blocks
blocks_with_reloads <-
insertReloadsAsNeeded dflags procpoints final_stackmaps entry new_blocks
new_blocks' <- mapM (lowerSafeForeignCall dflags) blocks_with_reloads
return (ofBlockList entry new_blocks', final_stackmaps)
-- -----------------------------------------------------------------------------
-- Pass 1
-- -----------------------------------------------------------------------------
layout :: DynFlags
-> LabelSet -- proc points
-> LabelMap CmmLocalLive -- liveness
-> BlockId -- entry
-> ByteOff -- stack args on entry
-> LabelMap StackMap -- [final] stack maps
-> ByteOff -- [final] Sp high water mark
-> [CmmBlock] -- [in] blocks
-> UniqSM
( LabelMap StackMap -- [out] stack maps
, ByteOff -- [out] Sp high water mark
, [CmmBlock] -- [out] new blocks
)
layout dflags procpoints liveness entry entry_args final_stackmaps final_sp_high blocks
= go blocks init_stackmap entry_args []
where
(updfr, cont_info) = collectContInfo blocks
init_stackmap = mapSingleton entry StackMap{ sm_sp = entry_args
, sm_args = entry_args
, sm_ret_off = updfr
, sm_regs = emptyUFM
}
go [] acc_stackmaps acc_hwm acc_blocks
= return (acc_stackmaps, acc_hwm, acc_blocks)
go (b0 : bs) acc_stackmaps acc_hwm acc_blocks
= do
let (entry0@(CmmEntry entry_lbl tscope), middle0, last0) = blockSplit b0
let stack0@StackMap { sm_sp = sp0 }
= mapFindWithDefault
(pprPanic "no stack map for" (ppr entry_lbl))
entry_lbl acc_stackmaps
-- (a) Update the stack map to include the effects of
-- assignments in this block
let stack1 = foldBlockNodesF (procMiddle acc_stackmaps) middle0 stack0
-- (b) Look at the last node and if we are making a call or
-- jumping to a proc point, we must save the live
-- variables, adjust Sp, and construct the StackMaps for
-- each of the successor blocks. See handleLastNode for
-- details.
(middle1, sp_off, last1, fixup_blocks, out)
<- handleLastNode dflags procpoints liveness cont_info
acc_stackmaps stack1 tscope middle0 last0
-- (c) Manifest Sp: run over the nodes in the block and replace
-- CmmStackSlot with CmmLoad from Sp with a concrete offset.
--
-- our block:
-- middle0 -- the original middle nodes
-- middle1 -- live variable saves from handleLastNode
-- Sp = Sp + sp_off -- Sp adjustment goes here
-- last1 -- the last node
--
let middle_pre = blockToList $ foldl' blockSnoc middle0 middle1
let final_blocks =
manifestSp dflags final_stackmaps stack0 sp0 final_sp_high
entry0 middle_pre sp_off last1 fixup_blocks
let acc_stackmaps' = mapUnion acc_stackmaps out
-- If this block jumps to the GC, then we do not take its
-- stack usage into account for the high-water mark.
-- Otherwise, if the only stack usage is in the stack-check
-- failure block itself, we will do a redundant stack
-- check. The stack has a buffer designed to accommodate
-- the largest amount of stack needed for calling the GC.
--
this_sp_hwm | isGcJump last0 = 0
| otherwise = sp0 - sp_off
hwm' = maximum (acc_hwm : this_sp_hwm : map sm_sp (mapElems out))
go bs acc_stackmaps' hwm' (final_blocks ++ acc_blocks)
-- -----------------------------------------------------------------------------
-- Not foolproof, but GCFun is the culprit we most want to catch
isGcJump :: CmmNode O C -> Bool
isGcJump (CmmCall { cml_target = CmmReg (CmmGlobal l) })
= l == GCFun || l == GCEnter1
isGcJump _something_else = False
-- -----------------------------------------------------------------------------
-- This doesn't seem right somehow. We need to find out whether this
-- proc will push some update frame material at some point, so that we
-- can avoid using that area of the stack for spilling. The
-- updfr_space field of the CmmProc *should* tell us, but it doesn't
-- (I think maybe it gets filled in later when we do proc-point
-- splitting).
--
-- So we'll just take the max of all the cml_ret_offs. This could be
-- unnecessarily pessimistic, but probably not in the code we
-- generate.
collectContInfo :: [CmmBlock] -> (ByteOff, LabelMap ByteOff)
collectContInfo blocks
= (maximum ret_offs, mapFromList (catMaybes mb_argss))
where
(mb_argss, ret_offs) = mapAndUnzip get_cont blocks
get_cont :: Block CmmNode x C -> (Maybe (Label, ByteOff), ByteOff)
get_cont b =
case lastNode b of
CmmCall { cml_cont = Just l, .. }
-> (Just (l, cml_ret_args), cml_ret_off)
CmmForeignCall { .. }
-> (Just (succ, ret_args), ret_off)
_other -> (Nothing, 0)
-- -----------------------------------------------------------------------------
-- Updating the StackMap from middle nodes
-- Look for loads from stack slots, and update the StackMap. This is
-- purely for optimisation reasons, so that we can avoid saving a
-- variable back to a different stack slot if it is already on the
-- stack.
--
-- This happens a lot: for example when function arguments are passed
-- on the stack and need to be immediately saved across a call, we
-- want to just leave them where they are on the stack.
--
procMiddle :: LabelMap StackMap -> CmmNode e x -> StackMap -> StackMap
procMiddle stackmaps node sm
= case node of
CmmAssign (CmmLocal r) (CmmLoad (CmmStackSlot area off) _)
-> sm { sm_regs = addToUFM (sm_regs sm) r (r,loc) }
where loc = getStackLoc area off stackmaps
CmmAssign (CmmLocal r) _other
-> sm { sm_regs = delFromUFM (sm_regs sm) r }
_other
-> sm
getStackLoc :: Area -> ByteOff -> LabelMap StackMap -> StackLoc
getStackLoc Old n _ = n
getStackLoc (Young l) n stackmaps =
case mapLookup l stackmaps of
Nothing -> pprPanic "getStackLoc" (ppr l)
Just sm -> sm_sp sm - sm_args sm + n
-- -----------------------------------------------------------------------------
-- Handling stack allocation for a last node
-- We take a single last node and turn it into:
--
-- C1 (some statements)
-- Sp = Sp + N
-- C2 (some more statements)
-- call f() -- the actual last node
--
-- plus possibly some more blocks (we may have to add some fixup code
-- between the last node and the continuation).
--
-- C1: is the code for saving the variables across this last node onto
-- the stack, if the continuation is a call or jumps to a proc point.
--
-- C2: if the last node is a safe foreign call, we have to inject some
-- extra code that goes *after* the Sp adjustment.
handleLastNode
:: DynFlags -> ProcPointSet -> LabelMap CmmLocalLive -> LabelMap ByteOff
-> LabelMap StackMap -> StackMap -> CmmTickScope
-> Block CmmNode O O
-> CmmNode O C
-> UniqSM
( [CmmNode O O] -- nodes to go *before* the Sp adjustment
, ByteOff -- amount to adjust Sp
, CmmNode O C -- new last node
, [CmmBlock] -- new blocks
, LabelMap StackMap -- stackmaps for the continuations
)
handleLastNode dflags procpoints liveness cont_info stackmaps
stack0@StackMap { sm_sp = sp0 } tscp middle last
= case last of
-- At each return / tail call,
-- adjust Sp to point to the last argument pushed, which
-- is cml_args, after popping any other junk from the stack.
CmmCall{ cml_cont = Nothing, .. } -> do
let sp_off = sp0 - cml_args
return ([], sp_off, last, [], mapEmpty)
-- At each CmmCall with a continuation:
CmmCall{ cml_cont = Just cont_lbl, .. } ->
return $ lastCall cont_lbl cml_args cml_ret_args cml_ret_off
CmmForeignCall{ succ = cont_lbl, .. } -> do
return $ lastCall cont_lbl (wORD_SIZE dflags) ret_args ret_off
-- one word of args: the return address
CmmBranch {} -> handleBranches
CmmCondBranch {} -> handleBranches
CmmSwitch {} -> handleBranches
where
-- Calls and ForeignCalls are handled the same way:
lastCall :: BlockId -> ByteOff -> ByteOff -> ByteOff
-> ( [CmmNode O O]
, ByteOff
, CmmNode O C
, [CmmBlock]
, LabelMap StackMap
)
lastCall lbl cml_args cml_ret_args cml_ret_off
= ( assignments
, spOffsetForCall sp0 cont_stack cml_args
, last
, [] -- no new blocks
, mapSingleton lbl cont_stack )
where
(assignments, cont_stack) = prepareStack lbl cml_ret_args cml_ret_off
prepareStack lbl cml_ret_args cml_ret_off
| Just cont_stack <- mapLookup lbl stackmaps
-- If we have already seen this continuation before, then
-- we just have to make the stack look the same:
= (fixupStack stack0 cont_stack, cont_stack)
-- Otherwise, we have to allocate the stack frame
| otherwise
= (save_assignments, new_cont_stack)
where
(new_cont_stack, save_assignments)
= setupStackFrame dflags lbl liveness cml_ret_off cml_ret_args stack0
-- For other last nodes (branches), if any of the targets is a
-- proc point, we have to set up the stack to match what the proc
-- point is expecting.
--
handleBranches :: UniqSM ( [CmmNode O O]
, ByteOff
, CmmNode O C
, [CmmBlock]
, LabelMap StackMap )
handleBranches
-- Note [diamond proc point]
| Just l <- futureContinuation middle
, (nub $ filter (`setMember` procpoints) $ successors last) == [l]
= do
let cont_args = mapFindWithDefault 0 l cont_info
(assigs, cont_stack) = prepareStack l cont_args (sm_ret_off stack0)
out = mapFromList [ (l', cont_stack)
| l' <- successors last ]
return ( assigs
, spOffsetForCall sp0 cont_stack (wORD_SIZE dflags)
, last
, []
, out)
| otherwise = do
pps <- mapM handleBranch (successors last)
let lbl_map :: LabelMap Label
lbl_map = mapFromList [ (l,tmp) | (l,tmp,_,_) <- pps ]
fix_lbl l = mapFindWithDefault l l lbl_map
return ( []
, 0
, mapSuccessors fix_lbl last
, concat [ blk | (_,_,_,blk) <- pps ]
, mapFromList [ (l, sm) | (l,_,sm,_) <- pps ] )
-- For each successor of this block
handleBranch :: BlockId -> UniqSM (BlockId, BlockId, StackMap, [CmmBlock])
handleBranch l
-- (a) if the successor already has a stackmap, we need to
-- shuffle the current stack to make it look the same.
-- We have to insert a new block to make this happen.
| Just stack2 <- mapLookup l stackmaps
= do
let assigs = fixupStack stack0 stack2
(tmp_lbl, block) <- makeFixupBlock dflags sp0 l stack2 tscp assigs
return (l, tmp_lbl, stack2, block)
-- (b) if the successor is a proc point, save everything
-- on the stack.
| l `setMember` procpoints
= do
let cont_args = mapFindWithDefault 0 l cont_info
(stack2, assigs) =
setupStackFrame dflags l liveness (sm_ret_off stack0)
cont_args stack0
(tmp_lbl, block) <- makeFixupBlock dflags sp0 l stack2 tscp assigs
return (l, tmp_lbl, stack2, block)
-- (c) otherwise, the current StackMap is the StackMap for
-- the continuation. But we must remember to remove any
-- variables from the StackMap that are *not* live at
-- the destination, because this StackMap might be used
-- by fixupStack if this is a join point.
| otherwise = return (l, l, stack1, [])
where live = mapFindWithDefault (panic "handleBranch") l liveness
stack1 = stack0 { sm_regs = filterUFM is_live (sm_regs stack0) }
is_live (r,_) = r `elemRegSet` live
makeFixupBlock :: DynFlags -> ByteOff -> Label -> StackMap
-> CmmTickScope -> [CmmNode O O]
-> UniqSM (Label, [CmmBlock])
makeFixupBlock dflags sp0 l stack tscope assigs
| null assigs && sp0 == sm_sp stack = return (l, [])
| otherwise = do
tmp_lbl <- newBlockId
let sp_off = sp0 - sm_sp stack
block = blockJoin (CmmEntry tmp_lbl tscope)
( maybeAddSpAdj dflags sp0 sp_off
$ blockFromList assigs )
(CmmBranch l)
return (tmp_lbl, [block])
-- Sp is currently pointing to current_sp,
-- we want it to point to
-- (sm_sp cont_stack - sm_args cont_stack + args)
-- so the difference is
-- sp0 - (sm_sp cont_stack - sm_args cont_stack + args)
spOffsetForCall :: ByteOff -> StackMap -> ByteOff -> ByteOff
spOffsetForCall current_sp cont_stack args
= current_sp - (sm_sp cont_stack - sm_args cont_stack + args)
-- | create a sequence of assignments to establish the new StackMap,
-- given the old StackMap.
fixupStack :: StackMap -> StackMap -> [CmmNode O O]
fixupStack old_stack new_stack = concatMap move new_locs
where
old_map = sm_regs old_stack
new_locs = stackSlotRegs new_stack
move (r,n)
| Just (_,m) <- lookupUFM old_map r, n == m = []
| otherwise = [CmmStore (CmmStackSlot Old n)
(CmmReg (CmmLocal r))]
setupStackFrame
:: DynFlags
-> BlockId -- label of continuation
-> LabelMap CmmLocalLive -- liveness
-> ByteOff -- updfr
-> ByteOff -- bytes of return values on stack
-> StackMap -- current StackMap
-> (StackMap, [CmmNode O O])
setupStackFrame dflags lbl liveness updfr_off ret_args stack0
= (cont_stack, assignments)
where
-- get the set of LocalRegs live in the continuation
live = mapFindWithDefault Set.empty lbl liveness
-- the stack from the base to updfr_off is off-limits.
-- our new stack frame contains:
-- * saved live variables
-- * the return address [young(C) + 8]
-- * the args for the call,
-- which are replaced by the return values at the return
-- point.
-- everything up to updfr_off is off-limits
-- stack1 contains updfr_off, plus everything we need to save
(stack1, assignments) = allocate dflags updfr_off live stack0
-- And the Sp at the continuation is:
-- sm_sp stack1 + ret_args
cont_stack = stack1{ sm_sp = sm_sp stack1 + ret_args
, sm_args = ret_args
, sm_ret_off = updfr_off
}
-- -----------------------------------------------------------------------------
-- Note [diamond proc point]
--
-- This special case looks for the pattern we get from a typical
-- tagged case expression:
--
-- Sp[young(L1)] = L1
-- if (R1 & 7) != 0 goto L1 else goto L2
-- L2:
-- call [R1] returns to L1
-- L1: live: {y}
-- x = R1
--
-- If we let the generic case handle this, we get
--
-- Sp[-16] = L1
-- if (R1 & 7) != 0 goto L1a else goto L2
-- L2:
-- Sp[-8] = y
-- Sp = Sp - 16
-- call [R1] returns to L1
-- L1a:
-- Sp[-8] = y
-- Sp = Sp - 16
-- goto L1
-- L1:
-- x = R1
--
-- The code for saving the live vars is duplicated in each branch, and
-- furthermore there is an extra jump in the fast path (assuming L1 is
-- a proc point, which it probably is if there is a heap check).
--
-- So to fix this we want to set up the stack frame before the
-- conditional jump. How do we know when to do this, and when it is
-- safe? The basic idea is, when we see the assignment
--
-- Sp[young(L)] = L
--
-- we know that
-- * we are definitely heading for L
-- * there can be no more reads from another stack area, because young(L)
-- overlaps with it.
--
-- We don't necessarily know that everything live at L is live now
-- (some might be assigned between here and the jump to L). So we
-- simplify and only do the optimisation when we see
--
-- (1) a block containing an assignment of a return address L
-- (2) ending in a branch where one (and only) continuation goes to L,
-- and no other continuations go to proc points.
--
-- then we allocate the stack frame for L at the end of the block,
-- before the branch.
--
-- We could generalise (2), but that would make it a bit more
-- complicated to handle, and this currently catches the common case.
futureContinuation :: Block CmmNode O O -> Maybe BlockId
futureContinuation middle = foldBlockNodesB f middle Nothing
where f :: CmmNode a b -> Maybe BlockId -> Maybe BlockId
f (CmmStore (CmmStackSlot (Young l) _) (CmmLit (CmmBlock _))) _
= Just l
f _ r = r
-- -----------------------------------------------------------------------------
-- Saving live registers
-- | Given a set of live registers and a StackMap, save all the registers
-- on the stack and return the new StackMap and the assignments to do
-- the saving.
--
allocate :: DynFlags -> ByteOff -> LocalRegSet -> StackMap
-> (StackMap, [CmmNode O O])
allocate dflags ret_off live stackmap@StackMap{ sm_sp = sp0
, sm_regs = regs0 }
=
-- we only have to save regs that are not already in a slot
let to_save = filter (not . (`elemUFM` regs0)) (Set.elems live)
regs1 = filterUFM (\(r,_) -> elemRegSet r live) regs0
in
-- make a map of the stack
let stack = reverse $ Array.elems $
accumArray (\_ x -> x) Empty (1, toWords dflags (max sp0 ret_off)) $
ret_words ++ live_words
where ret_words =
[ (x, Occupied)
| x <- [ 1 .. toWords dflags ret_off] ]
live_words =
[ (toWords dflags x, Occupied)
| (r,off) <- nonDetEltsUFM regs1,
-- See Note [Unique Determinism and code generation]
let w = localRegBytes dflags r,
x <- [ off, off - wORD_SIZE dflags .. off - w + 1] ]
in
-- Pass over the stack: find slots to save all the new live variables,
-- choosing the oldest slots first (hence a foldr).
let
save slot ([], stack, n, assigs, regs) -- no more regs to save
= ([], slot:stack, plusW dflags n 1, assigs, regs)
save slot (to_save, stack, n, assigs, regs)
= case slot of
Occupied -> (to_save, Occupied:stack, plusW dflags n 1, assigs, regs)
Empty
| Just (stack', r, to_save') <-
select_save to_save (slot:stack)
-> let assig = CmmStore (CmmStackSlot Old n')
(CmmReg (CmmLocal r))
n' = plusW dflags n 1
in
(to_save', stack', n', assig : assigs, (r,(r,n')):regs)
| otherwise
-> (to_save, slot:stack, plusW dflags n 1, assigs, regs)
-- we should do better here: right now we'll fit the smallest first,
-- but it would make more sense to fit the biggest first.
select_save :: [LocalReg] -> [StackSlot]
-> Maybe ([StackSlot], LocalReg, [LocalReg])
select_save regs stack = go regs []
where go [] _no_fit = Nothing
go (r:rs) no_fit
| Just rest <- dropEmpty words stack
= Just (replicate words Occupied ++ rest, r, rs++no_fit)
| otherwise
= go rs (r:no_fit)
where words = localRegWords dflags r
-- fill in empty slots as much as possible
(still_to_save, save_stack, n, save_assigs, save_regs)
= foldr save (to_save, [], 0, [], []) stack
-- push any remaining live vars on the stack
(push_sp, push_assigs, push_regs)
= foldr push (n, [], []) still_to_save
where
push r (n, assigs, regs)
= (n', assig : assigs, (r,(r,n')) : regs)
where
n' = n + localRegBytes dflags r
assig = CmmStore (CmmStackSlot Old n')
(CmmReg (CmmLocal r))
trim_sp
| not (null push_regs) = push_sp
| otherwise
= plusW dflags n (- length (takeWhile isEmpty save_stack))
final_regs = regs1 `addListToUFM` push_regs
`addListToUFM` save_regs
in
-- XXX should be an assert
if ( n /= max sp0 ret_off ) then pprPanic "allocate" (ppr n <+> ppr sp0 <+> ppr ret_off) else
if (trim_sp .&. (wORD_SIZE dflags - 1)) /= 0 then pprPanic "allocate2" (ppr trim_sp <+> ppr final_regs <+> ppr push_sp) else
( stackmap { sm_regs = final_regs , sm_sp = trim_sp }
, push_assigs ++ save_assigs )
-- -----------------------------------------------------------------------------
-- Manifesting Sp
-- | Manifest Sp: turn all the CmmStackSlots into CmmLoads from Sp. The
-- block looks like this:
--
-- middle_pre -- the middle nodes
-- Sp = Sp + sp_off -- Sp adjustment goes here
-- last -- the last node
--
-- And we have some extra blocks too (that don't contain Sp adjustments)
--
-- The adjustment for middle_pre will be different from that for
-- middle_post, because the Sp adjustment intervenes.
--
manifestSp
:: DynFlags
-> LabelMap StackMap -- StackMaps for other blocks
-> StackMap -- StackMap for this block
-> ByteOff -- Sp on entry to the block
-> ByteOff -- SpHigh
-> CmmNode C O -- first node
-> [CmmNode O O] -- middle
-> ByteOff -- sp_off
-> CmmNode O C -- last node
-> [CmmBlock] -- new blocks
-> [CmmBlock] -- final blocks with Sp manifest
manifestSp dflags stackmaps stack0 sp0 sp_high
first middle_pre sp_off last fixup_blocks
= final_block : fixup_blocks'
where
area_off = getAreaOff stackmaps
adj_pre_sp, adj_post_sp :: CmmNode e x -> CmmNode e x
adj_pre_sp = mapExpDeep (areaToSp dflags sp0 sp_high area_off)
adj_post_sp = mapExpDeep (areaToSp dflags (sp0 - sp_off) sp_high area_off)
final_middle = maybeAddSpAdj dflags sp0 sp_off
. blockFromList
. map adj_pre_sp
. elimStackStores stack0 stackmaps area_off
$ middle_pre
final_last = optStackCheck (adj_post_sp last)
final_block = blockJoin first final_middle final_last
fixup_blocks' = map (mapBlock3' (id, adj_post_sp, id)) fixup_blocks
getAreaOff :: LabelMap StackMap -> (Area -> StackLoc)
getAreaOff _ Old = 0
getAreaOff stackmaps (Young l) =
case mapLookup l stackmaps of
Just sm -> sm_sp sm - sm_args sm
Nothing -> pprPanic "getAreaOff" (ppr l)
maybeAddSpAdj
:: DynFlags -> ByteOff -> ByteOff -> Block CmmNode O O -> Block CmmNode O O
maybeAddSpAdj dflags sp0 sp_off block =
add_initial_unwind $ add_adj_unwind $ adj block
where
adj block
| sp_off /= 0
= block `blockSnoc` CmmAssign spReg (cmmOffset dflags spExpr sp_off)
| otherwise = block
-- Add unwind pseudo-instruction at the beginning of each block to
-- document Sp level for debugging
add_initial_unwind block
| debugLevel dflags > 0
= CmmUnwind [(Sp, Just sp_unwind)] `blockCons` block
| otherwise
= block
where sp_unwind = CmmRegOff spReg (sp0 - wORD_SIZE dflags)
-- Add unwind pseudo-instruction right after the Sp adjustment
-- if there is one.
add_adj_unwind block
| debugLevel dflags > 0
, sp_off /= 0
= block `blockSnoc` CmmUnwind [(Sp, Just sp_unwind)]
| otherwise
= block
where sp_unwind = CmmRegOff spReg (sp0 - wORD_SIZE dflags - sp_off)
{- Note [SP old/young offsets]
Sp(L) is the Sp offset on entry to block L relative to the base of the
OLD area.
SpArgs(L) is the size of the young area for L, i.e. the number of
arguments.
- in block L, each reference to [old + N] turns into
[Sp + Sp(L) - N]
- in block L, each reference to [young(L') + N] turns into
[Sp + Sp(L) - Sp(L') + SpArgs(L') - N]
- be careful with the last node of each block: Sp has already been adjusted
to be Sp + Sp(L) - Sp(L')
-}
areaToSp :: DynFlags -> ByteOff -> ByteOff -> (Area -> StackLoc) -> CmmExpr -> CmmExpr
areaToSp dflags sp_old _sp_hwm area_off (CmmStackSlot area n)
= cmmOffset dflags spExpr (sp_old - area_off area - n)
-- Replace (CmmStackSlot area n) with an offset from Sp
areaToSp dflags _ sp_hwm _ (CmmLit CmmHighStackMark)
= mkIntExpr dflags sp_hwm
-- Replace CmmHighStackMark with the number of bytes of stack used,
-- the sp_hwm. See Note [Stack usage] in GHC.StgToCmm.Heap
areaToSp dflags _ _ _ (CmmMachOp (MO_U_Lt _) args)
| falseStackCheck args
= zeroExpr dflags
areaToSp dflags _ _ _ (CmmMachOp (MO_U_Ge _) args)
| falseStackCheck args
= mkIntExpr dflags 1
-- Replace a stack-overflow test that cannot fail with a no-op
-- See Note [Always false stack check]
areaToSp _ _ _ _ other = other
-- | Determine whether a stack check cannot fail.
falseStackCheck :: [CmmExpr] -> Bool
falseStackCheck [ CmmMachOp (MO_Sub _)
[ CmmRegOff (CmmGlobal Sp) x_off
, CmmLit (CmmInt y_lit _)]
, CmmReg (CmmGlobal SpLim)]
= fromIntegral x_off >= y_lit
falseStackCheck _ = False
-- Note [Always false stack check]
-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-- We can optimise stack checks of the form
--
-- if ((Sp + x) - y < SpLim) then .. else ..
--
-- where are non-negative integer byte offsets. Since we know that
-- SpLim <= Sp (remember the stack grows downwards), this test must
-- yield False if (x >= y), so we can rewrite the comparison to False.
-- A subsequent sinking pass will later drop the dead code.
-- Optimising this away depends on knowing that SpLim <= Sp, so it is
-- really the job of the stack layout algorithm, hence we do it now.
--
-- The control flow optimiser may negate a conditional to increase
-- the likelihood of a fallthrough if the branch is not taken. But
-- not every conditional is inverted as the control flow optimiser
-- places some requirements on the predecessors of both branch targets.
-- So we better look for the inverted comparison too.
optStackCheck :: CmmNode O C -> CmmNode O C
optStackCheck n = -- Note [Always false stack check]
case n of
CmmCondBranch (CmmLit (CmmInt 0 _)) _true false _ -> CmmBranch false
CmmCondBranch (CmmLit (CmmInt _ _)) true _false _ -> CmmBranch true
other -> other
-- -----------------------------------------------------------------------------
-- | Eliminate stores of the form
--
-- Sp[area+n] = r
--
-- when we know that r is already in the same slot as Sp[area+n]. We
-- could do this in a later optimisation pass, but that would involve
-- a separate analysis and we already have the information to hand
-- here. It helps clean up some extra stack stores in common cases.
--
-- Note that we may have to modify the StackMap as we walk through the
-- code using procMiddle, since an assignment to a variable in the
-- StackMap will invalidate its mapping there.
--
elimStackStores :: StackMap
-> LabelMap StackMap
-> (Area -> ByteOff)
-> [CmmNode O O]
-> [CmmNode O O]
elimStackStores stackmap stackmaps area_off nodes
= go stackmap nodes
where
go _stackmap [] = []
go stackmap (n:ns)
= case n of
CmmStore (CmmStackSlot area m) (CmmReg (CmmLocal r))
| Just (_,off) <- lookupUFM (sm_regs stackmap) r
, area_off area + m == off
-> go stackmap ns
_otherwise
-> n : go (procMiddle stackmaps n stackmap) ns
-- -----------------------------------------------------------------------------
-- Update info tables to include stack liveness
setInfoTableStackMap :: DynFlags -> LabelMap StackMap -> CmmDecl -> CmmDecl
setInfoTableStackMap dflags stackmaps (CmmProc top_info@TopInfo{..} l v g)
= CmmProc top_info{ info_tbls = mapMapWithKey fix_info info_tbls } l v g
where
fix_info lbl info_tbl@CmmInfoTable{ cit_rep = StackRep _ } =
info_tbl { cit_rep = StackRep (get_liveness lbl) }
fix_info _ other = other
get_liveness :: BlockId -> Liveness
get_liveness lbl
= case mapLookup lbl stackmaps of
Nothing -> pprPanic "setInfoTableStackMap" (ppr lbl <+> ppr info_tbls)
Just sm -> stackMapToLiveness dflags sm
setInfoTableStackMap _ _ d = d
stackMapToLiveness :: DynFlags -> StackMap -> Liveness
stackMapToLiveness dflags StackMap{..} =
reverse $ Array.elems $
accumArray (\_ x -> x) True (toWords dflags sm_ret_off + 1,
toWords dflags (sm_sp - sm_args)) live_words
where
live_words = [ (toWords dflags off, False)
| (r,off) <- nonDetEltsUFM sm_regs
, isGcPtrType (localRegType r) ]
-- See Note [Unique Determinism and code generation]
-- -----------------------------------------------------------------------------
-- Pass 2
-- -----------------------------------------------------------------------------
insertReloadsAsNeeded
:: DynFlags
-> ProcPointSet
-> LabelMap StackMap
-> BlockId
-> [CmmBlock]
-> UniqSM [CmmBlock]
insertReloadsAsNeeded dflags procpoints final_stackmaps entry blocks = do
toBlockList . fst <$>
rewriteCmmBwd liveLattice rewriteCC (ofBlockList entry blocks) mapEmpty
where
rewriteCC :: RewriteFun CmmLocalLive
rewriteCC (BlockCC e_node middle0 x_node) fact_base0 = do
let entry_label = entryLabel e_node
stackmap = case mapLookup entry_label final_stackmaps of
Just sm -> sm
Nothing -> panic "insertReloadsAsNeeded: rewriteCC: stackmap"
-- Merge the liveness from successor blocks and analyse the last
-- node.
joined = gen_kill dflags x_node $!
joinOutFacts liveLattice x_node fact_base0
-- What is live at the start of middle0.
live_at_middle0 = foldNodesBwdOO (gen_kill dflags) middle0 joined
-- If this is a procpoint we need to add the reloads, but only if
-- they're actually live. Furthermore, nothing is live at the entry
-- to a proc point.
(middle1, live_with_reloads)
| entry_label `setMember` procpoints
= let reloads = insertReloads dflags stackmap live_at_middle0
in (foldr blockCons middle0 reloads, emptyRegSet)
| otherwise
= (middle0, live_at_middle0)
-- Final liveness for this block.
!fact_base2 = mapSingleton entry_label live_with_reloads
return (BlockCC e_node middle1 x_node, fact_base2)
insertReloads :: DynFlags -> StackMap -> CmmLocalLive -> [CmmNode O O]
insertReloads dflags stackmap live =
[ CmmAssign (CmmLocal reg)
-- This cmmOffset basically corresponds to manifesting
-- @CmmStackSlot Old sp_off@, see Note [SP old/young offsets]
(CmmLoad (cmmOffset dflags spExpr (sp_off - reg_off))
(localRegType reg))
| (reg, reg_off) <- stackSlotRegs stackmap
, reg `elemRegSet` live
]
where
sp_off = sm_sp stackmap
-- -----------------------------------------------------------------------------
-- Lowering safe foreign calls
{-
Note [Lower safe foreign calls]
We start with
Sp[young(L1)] = L1
,-----------------------
| r1 = foo(x,y,z) returns to L1
'-----------------------
L1:
R1 = r1 -- copyIn, inserted by mkSafeCall
...
the stack layout algorithm will arrange to save and reload everything
live across the call. Our job now is to expand the call so we get
Sp[young(L1)] = L1
,-----------------------
| SAVE_THREAD_STATE()
| token = suspendThread(BaseReg, interruptible)
| r = foo(x,y,z)
| BaseReg = resumeThread(token)
| LOAD_THREAD_STATE()
| R1 = r -- copyOut
| jump Sp[0]
'-----------------------
L1:
r = R1 -- copyIn, inserted by mkSafeCall
...
Note the copyOut, which saves the results in the places that L1 is
expecting them (see Note [safe foreign call convention]). Note also
that safe foreign call is replace by an unsafe one in the Cmm graph.
-}
lowerSafeForeignCall :: DynFlags -> CmmBlock -> UniqSM CmmBlock
lowerSafeForeignCall dflags block
| (entry@(CmmEntry _ tscp), middle, CmmForeignCall { .. }) <- blockSplit block
= do
-- Both 'id' and 'new_base' are KindNonPtr because they're
-- RTS-only objects and are not subject to garbage collection
id <- newTemp (bWord dflags)
new_base <- newTemp (cmmRegType dflags baseReg)
let (caller_save, caller_load) = callerSaveVolatileRegs dflags
save_state_code <- saveThreadState dflags
load_state_code <- loadThreadState dflags
let suspend = save_state_code <*>
caller_save <*>
mkMiddle (callSuspendThread dflags id intrbl)
midCall = mkUnsafeCall tgt res args
resume = mkMiddle (callResumeThread new_base id) <*>
-- Assign the result to BaseReg: we
-- might now have a different Capability!
mkAssign baseReg (CmmReg (CmmLocal new_base)) <*>
caller_load <*>
load_state_code
(_, regs, copyout) =
copyOutOflow dflags NativeReturn Jump (Young succ)
(map (CmmReg . CmmLocal) res)
ret_off []
-- NB. after resumeThread returns, the top-of-stack probably contains
-- the stack frame for succ, but it might not: if the current thread
-- received an exception during the call, then the stack might be
-- different. Hence we continue by jumping to the top stack frame,
-- not by jumping to succ.
jump = CmmCall { cml_target = entryCode dflags $
CmmLoad spExpr (bWord dflags)
, cml_cont = Just succ
, cml_args_regs = regs
, cml_args = widthInBytes (wordWidth dflags)
, cml_ret_args = ret_args
, cml_ret_off = ret_off }
graph' <- lgraphOfAGraph ( suspend <*>
midCall <*>
resume <*>
copyout <*>
mkLast jump, tscp)
case toBlockList graph' of
[one] -> let (_, middle', last) = blockSplit one
in return (blockJoin entry (middle `blockAppend` middle') last)
_ -> panic "lowerSafeForeignCall0"
-- Block doesn't end in a safe foreign call:
| otherwise = return block
foreignLbl :: FastString -> CmmExpr
foreignLbl name = CmmLit (CmmLabel (mkForeignLabel name Nothing ForeignLabelInExternalPackage IsFunction))
callSuspendThread :: DynFlags -> LocalReg -> Bool -> CmmNode O O
callSuspendThread dflags id intrbl =
CmmUnsafeForeignCall
(ForeignTarget (foreignLbl (fsLit "suspendThread"))
(ForeignConvention CCallConv [AddrHint, NoHint] [AddrHint] CmmMayReturn))
[id] [baseExpr, mkIntExpr dflags (fromEnum intrbl)]
callResumeThread :: LocalReg -> LocalReg -> CmmNode O O
callResumeThread new_base id =
CmmUnsafeForeignCall
(ForeignTarget (foreignLbl (fsLit "resumeThread"))
(ForeignConvention CCallConv [AddrHint] [AddrHint] CmmMayReturn))
[new_base] [CmmReg (CmmLocal id)]
-- -----------------------------------------------------------------------------
plusW :: DynFlags -> ByteOff -> WordOff -> ByteOff
plusW dflags b w = b + w * wORD_SIZE dflags
data StackSlot = Occupied | Empty
-- Occupied: a return address or part of an update frame
instance Outputable StackSlot where
ppr Occupied = text "XXX"
ppr Empty = text "---"
dropEmpty :: WordOff -> [StackSlot] -> Maybe [StackSlot]
dropEmpty 0 ss = Just ss
dropEmpty n (Empty : ss) = dropEmpty (n-1) ss
dropEmpty _ _ = Nothing
isEmpty :: StackSlot -> Bool
isEmpty Empty = True
isEmpty _ = False
localRegBytes :: DynFlags -> LocalReg -> ByteOff
localRegBytes dflags r
= roundUpToWords dflags (widthInBytes (typeWidth (localRegType r)))
localRegWords :: DynFlags -> LocalReg -> WordOff
localRegWords dflags = toWords dflags . localRegBytes dflags
toWords :: DynFlags -> ByteOff -> WordOff
toWords dflags x = x `quot` wORD_SIZE dflags
stackSlotRegs :: StackMap -> [(LocalReg, StackLoc)]
stackSlotRegs sm = nonDetEltsUFM (sm_regs sm)
-- See Note [Unique Determinism and code generation]
| sdiehl/ghc | compiler/GHC/Cmm/LayoutStack.hs | bsd-3-clause | 47,009 | 1 | 25 | 13,614 | 7,793 | 4,165 | 3,628 | 556 | 6 |
module Graphics.ChalkBoard.Font
( Graphics.ChalkBoard.Font.initFont
, Font
, letter
, lineSpacing
, label
) where
--import Data.Boolean
--import Data.Ix
import Data.Array.Unboxed
import Graphics.Rendering.TrueType.STB hiding (Font)
import qualified Graphics.Rendering.TrueType.STB as STB
--import Graphics.ChalkBoard
--import Graphics.ChalkBoard.Utils
import Graphics.ChalkBoard.Types
import Graphics.ChalkBoard.Board
import Graphics.ChalkBoard.Buffer
--import Graphics.ChalkBoard.O
import qualified Data.ByteString as BS
data Font = Font
STB.Font -- the font
() -- the cache for each use char
initFont :: String -> Int -> IO Font
initFont fontFile ix = do
tt <- loadTTF fontFile
en <- enumerateFonts tt
font <- STB.initFont tt (en !! ix)
return $ Font font ()
lineSpacing :: Font -> Float -> IO Float
lineSpacing (Font font _) sz = do
met <- getFontVerticalMetrics font
return $ sz * (fromIntegral (ascent met - descent met + lineGap met))
label :: Font -> Float -> String -> IO (Board UI, Float)
label font sz str = do
let brd0 :: Board (UI)
brd0 = boardOf 0.0 --flip withMask false <$> boardOf 0.9
brds <- sequence [ do
(b,off) <- letter font sz ch
return (b,off)
| ch <- str
]
-- let lens :: [Float]
-- lens = 0 : Prelude.zipWith (+) (map snd brds) lens
let brd1 :: Board UI
brd1 = foldr (\ (buff,off) brd -> buff `bufferOnBoard` (move (off,0) brd)) brd0
(Prelude.zip (map fst brds) (map id (map snd brds)))
-- Use UI rather than Maybe UI later: it will be more efficient.
-- because we avoid the big <$> here, over the *whole* board.
return (brd1, sum (map snd brds))
letter :: Font -> Float -> Char -> IO
( Buffer UI --
, Float -- how far to push rest of word to right
)
letter (Font font ()) sz ch = do
glyph_K <- do opt <- findGlyph font ch
case opt of
Just v -> return v
Nothing -> error $ "Cannot find: " ++ show ch
-- bb_K <- getGlyphBoundingBox font glyph_K
(bm_K,_) <- newGlyphBitmap font glyph_K (sz,sz)
m_K <- getGlyphHorizontalMetrics font glyph_K
bma_K <- bitmapArray bm_K
-- The backing board must be a power of two (PoT).
let pot' n x = if n > x then n else pot' (n * 2) x
let pot = pot' 1
let ((x0,y0),(x1,y1)) = bounds bma_K
-- print (x1,y1)
let x1' = pot x1 - 1
let y1' = pot y1 - 1
let bs' = BS.pack [ if x > x1 || y > y1 then 0 else bma_K ! (x1 - (x - x0),y)
| x <- [x0..x1']
, y <- [y0..y1']
]
-- let x1' = x1
-- let y1' = y1
-- let (x1'',y1'') = (pot x1' - 1,pot y1' - 1)
-- let (bo_x,bo_y) = bo_K
-- let (BBox (a,b) (c,d)) = bb_K
xx <- getGlyphBitmapBox font glyph_K (sz,sz)
let (BBox (_,_) (_,d)) = xx
return
( moveBuffer (0 + ceiling (sz * fromIntegral (leftSideBearing m_K)),-d) $ newBufferUI bs' (y1'+1,x1'+1)
, sz * fromIntegral (advanceWidth m_K) -- + the remainer from the ceiling operation
)
--just :: O UI -> O (Maybe UI)
--just o = withMask o (o /=* 0)
| andygill/chalkboard2 | Graphics/ChalkBoard/Font.hs | bsd-3-clause | 3,058 | 64 | 14 | 779 | 989 | 542 | 447 | 62 | 4 |
-----------------------------------------------------------------------------
-- |
-- Module : RefacInstantiate
-- Copyright : (c) Christopher Brown 2007
--
-- Maintainer : [email protected]
-- Stability : provisional
-- Portability : portable
--
-- This module contains a transformation for HaRe.
-- Instantiating Patterns
module RefacInstantiate where
import System.IO.Unsafe
import PrettyPrint
import RefacTypeSyn
import RefacLocUtils
-- import GHC (Session)
import Data.Char
import GHC.Unicode
import AbstractIO
import Maybe
import List
import RefacUtils
import RefacRedunDec
import SlicingUtils
import Directory
import LocalSettings
refacInstantiate args
= do
let fileName = args!!0
begin = read (args!!1)::Int
end = read (args!!2)::Int
instantPatt = drop 3 args
AbstractIO.putStrLn "refacInstantiate"
(inscps, exps, mod, tokList) <- parseSourceFile fileName
case findMatch fileName begin end mod of
Nothing -> do error "You can only instantiate patterns on the LHS of a match!"
Just (decl, pats) ->
do
let pairedPats = pairPatWithInstance pats instantPatt
res <- findAndReplaceAllPat decl pairedPats
case checkCursor fileName begin end mod of
Left errMsg -> do error errMsg
Right fun ->
do
let newFun = addMatch fun res
((_,m), (newToks, newMod)) <- applyRefac (addNewPat fun newFun) (Just (inscps, exps, mod, tokList)) fileName
writeRefactoredFiles False [((fileName, m), (newToks, newMod))]
AbstractIO.putStrLn "Completed.\n"
addMatch :: HsDeclP -> HsMatchP -> HsDeclP
addMatch (Dec (HsFunBind x ms) ) m = (Dec (HsFunBind x (m : ms)))
addMatch x _ = error "You can only instantiate patterns on the LHS of a match!"
addNewPat fun newFun (_, _, mod)
= do
newMod <- update fun newFun mod
return newMod
pairPatWithInstance :: [HsPatP] -> [ String ] -> [ (HsPatP, String) ]
pairPatWithInstance [] _ = []
pairPatWithInstance _ [] = []
pairPatWithInstance ((Pat (HsPLit _ x)):_) (s:ss)
| convert x /= s && s /= "_" = error "Can only instantiate an identifier!"
pairPatWithInstance (p:ps) (s:ss) = (p, s) : pairPatWithInstance ps ss
convert ::HsLiteral -> String
convert (HsInt x) = show x
convert (HsChar x) = show x
convert (HsString x) = x
convert (HsFrac x) = show x
convert (HsCharPrim x) = show x
convert (HsStringPrim x) = show x
convert (HsIntPrim x) = show x
convert (HsFloatPrim x) = show x
convert (HsDoublePrim x) = show x
convertt (HsLitLit x ) = x
findAndReplaceAllPat :: (Term t, Monad m) => t -> [ (HsPatP, String) ] -> m t
findAndReplaceAllPat t [] = return t
findAndReplaceAllPat t (x:xs)
= do
res <- findAndReplacePat t x
rest <- findAndReplaceAllPat res xs
return rest
findAndReplacePat :: (Term t, Monad m) => t -> (HsPatP, String) -> m t
findAndReplacePat t (p,s)
= applyTP (full_tdTP (idTP `adhocTP` inRhs)) t
where
inRhs (pnt::PNT)
| (patToPNT p) == pnt
= do return (nameToPNT s)
inRhs x = return x
findMatch :: Term t => String -> Int -> Int -> t -> Maybe (HsMatchP, [HsPatP])
findMatch fileName row col mod
= applyTU (once_tdTU (failTU `adhocTU` inMatch)) mod
where
--The selected sub-expression is in the rhs of a match
inMatch (match@(HsMatch loc1 pnt pats (rhs@(HsBody e)) ds)::HsMatchP)
| useLoc (locToPNT fileName (row, col) mod) == useLoc pnt
= Just (match, pats)
inMatch _ = Nothing
checkCursor :: String -> Int -> Int -> HsModuleP -> Either String HsDeclP
checkCursor fileName row col mod
= case locToPName of
Nothing -> Left ("Invalid cursor position. Please place cursor at the beginning of the definition!")
Just decl -> Right decl
where
locToPName
= case res of
Nothing -> find (definesPNT (locToPNT fileName (row, col) mod)) (hsDecls mod)
_ -> res
res = find (defines (locToPN fileName (row, col) mod)) (concat (map hsDecls (hsModDecls mod)))
definesPNT pnt d@(Dec (HsPatBind loc p e ds))
= findPNT pnt d
definesPNT pnt d@(Dec (HsFunBind loc ms))
= findPNT pnt d
definesPNT _ _ = False
| forste/haReFork | refactorer/RefacInstantiate.hs | bsd-3-clause | 4,417 | 0 | 21 | 1,199 | 1,477 | 758 | 719 | -1 | -1 |
module Network.Probecraft where
import Network.Pcap
import Network.Probecraft.Sniff
printPackets iface cnt bpf = do
pcap <- openLive iface 1500 False 100000
setFilter pcap bpf True 0
link <- datalink pcap
print $ show link
loopBS pcap cnt handler
(statistics pcap) >>= print
where handler = \head dat -> do
print "packet:"
print head
let pkt = id $! ethernet dat
print $ ((matchEth ./. matchIpv4) ./. (matchIcmp .|. matchTcp .|. matchUdp)) pkt
putStrLn $ pp pkt
| lucasdicioccio/probecraft-hs | Network/Probecraft.hs | bsd-3-clause | 614 | 0 | 16 | 227 | 187 | 88 | 99 | 16 | 1 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE FlexibleInstances #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE TemplateHaskell #-}
{-# LANGUAGE ViewPatterns #-}
-- | Build-specific types.
module Stack.Types.Build
(StackBuildException(..)
,FlagSource(..)
,UnusedFlags(..)
,InstallLocation(..)
,ModTime
,modTime
,Installed(..)
,PackageInstallInfo(..)
,Task(..)
,taskLocation
,LocalPackage(..)
,BaseConfigOpts(..)
,Plan(..)
,TestOpts(..)
,BenchmarkOpts(..)
,FileWatchOpts(..)
,BuildOpts(..)
,BuildSubset(..)
,defaultBuildOpts
,TaskType(..)
,TaskConfigOpts(..)
,ConfigCache(..)
,ConstructPlanException(..)
,configureOpts
,BadDependency(..)
,wantedLocalPackages
,FileCacheInfo (..)
,ConfigureOpts (..)
,PrecompiledCache (..))
where
import Control.DeepSeq
import Control.Exception
import Data.Binary (getWord8, putWord8, gput, gget)
import Data.Binary.VersionTagged
import qualified Data.ByteString as S
import Data.Char (isSpace)
import Data.Data
import Data.Hashable
import Data.List (dropWhileEnd, nub, intercalate)
import qualified Data.Map as Map
import Data.Map.Strict (Map)
import Data.Maybe
import Data.Monoid
import Data.Set (Set)
import qualified Data.Set as Set
import Data.Text (Text)
import qualified Data.Text as T
import Data.Text.Encoding (decodeUtf8With)
import Data.Text.Encoding.Error (lenientDecode)
import Data.Time.Calendar
import Data.Time.Clock
import Distribution.System (Arch)
import Distribution.Text (display)
import GHC.Generics
import Path (Path, Abs, File, Dir, mkRelDir, toFilePath, parseRelDir, (</>))
import Path.Extra (toFilePathNoTrailingSep)
import Prelude
import Stack.Types.FlagName
import Stack.Types.GhcPkgId
import Stack.Types.Compiler
import Stack.Types.Config
import Stack.Types.Package
import Stack.Types.PackageIdentifier
import Stack.Types.PackageName
import Stack.Types.Version
import System.Exit (ExitCode (ExitFailure))
import System.FilePath (pathSeparator)
import System.Process.Log (showProcessArgDebug)
----------------------------------------------
-- Exceptions
data StackBuildException
= Couldn'tFindPkgId PackageName
| CompilerVersionMismatch
(Maybe (CompilerVersion, Arch))
(CompilerVersion, Arch)
GHCVariant
VersionCheck
(Maybe (Path Abs File))
Text -- recommended resolution
-- ^ Path to the stack.yaml file
| Couldn'tParseTargets [Text]
| UnknownTargets
(Set PackageName) -- no known version
(Map PackageName Version) -- not in snapshot, here's the most recent version in the index
(Path Abs File) -- stack.yaml
| TestSuiteFailure PackageIdentifier (Map Text (Maybe ExitCode)) (Maybe (Path Abs File)) S.ByteString
| ConstructPlanExceptions
[ConstructPlanException]
(Path Abs File) -- stack.yaml
| CabalExitedUnsuccessfully
ExitCode
PackageIdentifier
(Path Abs File) -- cabal Executable
[String] -- cabal arguments
(Maybe (Path Abs File)) -- logfiles location
S.ByteString -- log contents
| ExecutionFailure [SomeException]
| LocalPackageDoesn'tMatchTarget
PackageName
Version -- local version
Version -- version specified on command line
| NoSetupHsFound (Path Abs Dir)
| InvalidFlagSpecification (Set UnusedFlags)
| TargetParseException [Text]
| DuplicateLocalPackageNames [(PackageName, [Path Abs Dir])]
| SolverMissingCabalInstall
| SolverMissingGHC
| SolverNoCabalFiles
deriving Typeable
data FlagSource = FSCommandLine | FSStackYaml
deriving (Show, Eq, Ord)
data UnusedFlags = UFNoPackage FlagSource PackageName
| UFFlagsNotDefined FlagSource Package (Set FlagName)
| UFSnapshot PackageName
deriving (Show, Eq, Ord)
instance Show StackBuildException where
show (Couldn'tFindPkgId name) =
("After installing " <> packageNameString name <>
", the package id couldn't be found " <> "(via ghc-pkg describe " <>
packageNameString name <> "). This shouldn't happen, " <>
"please report as a bug")
show (CompilerVersionMismatch mactual (expected, earch) ghcVariant check mstack resolution) = concat
[ case mactual of
Nothing -> "No compiler found, expected "
Just (actual, arch) -> concat
[ "Compiler version mismatched, found "
, compilerVersionString actual
, " ("
, display arch
, ")"
, ", but expected "
]
, case check of
MatchMinor -> "minor version match with "
MatchExact -> "exact version "
NewerMinor -> "minor version match or newer with "
, compilerVersionString expected
, " ("
, display earch
, ghcVariantSuffix ghcVariant
, ") (based on "
, case mstack of
Nothing -> "command line arguments"
Just stack -> "resolver setting in " ++ toFilePath stack
, ").\n"
, T.unpack resolution
]
show (Couldn'tParseTargets targets) = unlines
$ "The following targets could not be parsed as package names or directories:"
: map T.unpack targets
show (UnknownTargets noKnown notInSnapshot stackYaml) =
unlines $ noKnown' ++ notInSnapshot'
where
noKnown'
| Set.null noKnown = []
| otherwise = return $
"The following target packages were not found: " ++
intercalate ", " (map packageNameString $ Set.toList noKnown)
notInSnapshot'
| Map.null notInSnapshot = []
| otherwise =
"The following packages are not in your snapshot, but exist"
: "in your package index. Recommended action: add them to your"
: ("extra-deps in " ++ toFilePath stackYaml)
: "(Note: these are the most recent versions,"
: "but there's no guarantee that they'll build together)."
: ""
: map
(\(name, version) -> "- " ++ packageIdentifierString
(PackageIdentifier name version))
(Map.toList notInSnapshot)
show (TestSuiteFailure ident codes mlogFile bs) = unlines $ concat
[ ["Test suite failure for package " ++ packageIdentifierString ident]
, flip map (Map.toList codes) $ \(name, mcode) -> concat
[ " "
, T.unpack name
, ": "
, case mcode of
Nothing -> " executable not found"
Just ec -> " exited with: " ++ show ec
]
, return $ case mlogFile of
Nothing -> "Logs printed to console"
-- TODO Should we load up the full error output and print it here?
Just logFile -> "Full log available at " ++ toFilePath logFile
, if S.null bs
then []
else ["", "", doubleIndent $ T.unpack $ decodeUtf8With lenientDecode bs]
]
where
indent = dropWhileEnd isSpace . unlines . fmap (\line -> " " ++ line) . lines
doubleIndent = indent . indent
show (ConstructPlanExceptions exceptions stackYaml) =
"While constructing the BuildPlan the following exceptions were encountered:" ++
appendExceptions exceptions' ++
if Map.null extras then "" else (unlines
$ ("\n\nRecommended action: try adding the following to your extra-deps in "
++ toFilePath stackYaml)
: map (\(name, version) -> concat
[ "- "
, packageNameString name
, "-"
, versionString version
]) (Map.toList extras)
++ ["", "You may also want to try the 'stack solver' command"]
)
where
exceptions' = removeDuplicates exceptions
appendExceptions = foldr (\e -> (++) ("\n\n--" ++ show e)) ""
removeDuplicates = nub
extras = Map.unions $ map getExtras exceptions'
getExtras (DependencyCycleDetected _) = Map.empty
getExtras (UnknownPackage _) = Map.empty
getExtras (DependencyPlanFailures _ m) =
Map.unions $ map go $ Map.toList m
where
go (name, (_range, Just version, NotInBuildPlan)) =
Map.singleton name version
go _ = Map.empty
-- Supressing duplicate output
show (CabalExitedUnsuccessfully exitCode taskProvides' execName fullArgs logFiles bs) =
let fullCmd = unwords
$ dropQuotes (toFilePath execName)
: map (T.unpack . showProcessArgDebug) fullArgs
logLocations = maybe "" (\fp -> "\n Logs have been written to: " ++ toFilePath fp) logFiles
in "\n-- While building package " ++ dropQuotes (show taskProvides') ++ " using:\n" ++
" " ++ fullCmd ++ "\n" ++
" Process exited with code: " ++ show exitCode ++
(if exitCode == ExitFailure (-9)
then " (THIS MAY INDICATE OUT OF MEMORY)"
else "") ++
logLocations ++
(if S.null bs
then ""
else "\n\n" ++ doubleIndent (T.unpack $ decodeUtf8With lenientDecode bs))
where
-- appendLines = foldr (\pName-> (++) ("\n" ++ show pName)) ""
indent = dropWhileEnd isSpace . unlines . fmap (\line -> " " ++ line) . lines
dropQuotes = filter ('\"' /=)
doubleIndent = indent . indent
show (ExecutionFailure es) = intercalate "\n\n" $ map show es
show (LocalPackageDoesn'tMatchTarget name localV requestedV) = concat
[ "Version for local package "
, packageNameString name
, " is "
, versionString localV
, ", but you asked for "
, versionString requestedV
, " on the command line"
]
show (NoSetupHsFound dir) =
"No Setup.hs or Setup.lhs file found in " ++ toFilePath dir
show (InvalidFlagSpecification unused) = unlines
$ "Invalid flag specification:"
: map go (Set.toList unused)
where
showFlagSrc :: FlagSource -> String
showFlagSrc FSCommandLine = " (specified on command line)"
showFlagSrc FSStackYaml = " (specified in stack.yaml)"
go :: UnusedFlags -> String
go (UFNoPackage src name) = concat
[ "- Package '"
, packageNameString name
, "' not found"
, showFlagSrc src
]
go (UFFlagsNotDefined src pkg flags) = concat
[ "- Package '"
, name
, "' does not define the following flags"
, showFlagSrc src
, ":\n"
, intercalate "\n"
(map (\flag -> " " ++ flagNameString flag)
(Set.toList flags))
, "\n- Flags defined by package '" ++ name ++ "':\n"
, intercalate "\n"
(map (\flag -> " " ++ name ++ ":" ++ flagNameString flag)
(Set.toList pkgFlags))
]
where name = packageNameString (packageName pkg)
pkgFlags = packageDefinedFlags pkg
go (UFSnapshot name) = concat
[ "- Attempted to set flag on snapshot package "
, packageNameString name
, ", please add to extra-deps"
]
show (TargetParseException [err]) = "Error parsing targets: " ++ T.unpack err
show (TargetParseException errs) = unlines
$ "The following errors occurred while parsing the build targets:"
: map (("- " ++) . T.unpack) errs
show (DuplicateLocalPackageNames pairs) = concat
$ "The same package name is used in multiple local packages\n"
: map go pairs
where
go (name, dirs) = unlines
$ ""
: (packageNameString name ++ " used in:")
: map goDir dirs
goDir dir = "- " ++ toFilePath dir
show SolverMissingCabalInstall = unlines
[ "Solver requires that cabal be on your PATH"
, "Try running 'stack install cabal-install'"
]
show SolverMissingGHC = unlines
[ "Solver requires that GHC be on your PATH"
, "Try running 'stack setup'"
]
show SolverNoCabalFiles = unlines
[ "No cabal files provided. Maybe this is due to not having a stack.yaml file?"
, "Try running 'stack init' to create a stack.yaml"
]
instance Exception StackBuildException
data ConstructPlanException
= DependencyCycleDetected [PackageName]
| DependencyPlanFailures PackageIdentifier (Map PackageName (VersionRange, LatestVersion, BadDependency))
| UnknownPackage PackageName -- TODO perhaps this constructor will be removed, and BadDependency will handle it all
-- ^ Recommend adding to extra-deps, give a helpful version number?
deriving (Typeable, Eq)
-- | For display purposes only, Nothing if package not found
type LatestVersion = Maybe Version
-- | Reason why a dependency was not used
data BadDependency
= NotInBuildPlan
| Couldn'tResolveItsDependencies
| DependencyMismatch Version
deriving (Typeable, Eq)
instance Show ConstructPlanException where
show e =
let details = case e of
(DependencyCycleDetected pNames) ->
"While checking call stack,\n" ++
" dependency cycle detected in packages:" ++ indent (appendLines pNames)
(DependencyPlanFailures pIdent (Map.toList -> pDeps)) ->
"Failure when adding dependencies:" ++ doubleIndent (appendDeps pDeps) ++ "\n" ++
" needed for package: " ++ packageIdentifierString pIdent
(UnknownPackage pName) ->
"While attempting to add dependency,\n" ++
" Could not find package " ++ show pName ++ " in known packages"
in indent details
where
appendLines = foldr (\pName-> (++) ("\n" ++ show pName)) ""
indent = dropWhileEnd isSpace . unlines . fmap (\line -> " " ++ line) . lines
doubleIndent = indent . indent
appendDeps = foldr (\dep-> (++) ("\n" ++ showDep dep)) ""
showDep (name, (range, mlatest, badDep)) = concat
[ show name
, ": needed ("
, display range
, ")"
, ", "
, let latestStr =
case mlatest of
Nothing -> ""
Just latest -> " (latest is " ++ versionString latest ++ ")"
in case badDep of
NotInBuildPlan -> "not present in build plan" ++ latestStr
Couldn'tResolveItsDependencies -> "couldn't resolve its dependencies"
DependencyMismatch version ->
case mlatest of
Just latest
| latest == version ->
versionString version ++
" found (latest version available)"
_ -> versionString version ++ " found" ++ latestStr
]
{- TODO Perhaps change the showDep function to look more like this:
dropQuotes = filter ((/=) '\"')
(VersionOutsideRange pName pIdentifier versionRange) ->
"Exception: Stack.Build.VersionOutsideRange\n" ++
" While adding dependency for package " ++ show pName ++ ",\n" ++
" " ++ dropQuotes (show pIdentifier) ++ " was found to be outside its allowed version range.\n" ++
" Allowed version range is " ++ display versionRange ++ ",\n" ++
" should you correct the version range for " ++ dropQuotes (show pIdentifier) ++ ", found in [extra-deps] in the project's stack.yaml?"
-}
----------------------------------------------
-- | Which subset of packages to build
data BuildSubset
= BSAll
| BSOnlySnapshot
-- ^ Only install packages in the snapshot database, skipping
-- packages intended for the local database.
| BSOnlyDependencies
deriving (Show, Eq)
-- | Configuration for building.
data BuildOpts =
BuildOpts {boptsTargets :: ![Text]
,boptsLibProfile :: !Bool
,boptsExeProfile :: !Bool
,boptsHaddock :: !Bool
-- ^ Build haddocks?
,boptsHaddockDeps :: !(Maybe Bool)
-- ^ Build haddocks for dependencies?
,boptsDryrun :: !Bool
,boptsGhcOptions :: ![Text]
,boptsFlags :: !(Map (Maybe PackageName) (Map FlagName Bool))
,boptsInstallExes :: !Bool
-- ^ Install executables to user path after building?
,boptsPreFetch :: !Bool
-- ^ Fetch all packages immediately
,boptsBuildSubset :: !BuildSubset
,boptsFileWatch :: !FileWatchOpts
-- ^ Watch files for changes and automatically rebuild
,boptsKeepGoing :: !(Maybe Bool)
-- ^ Keep building/running after failure
,boptsForceDirty :: !Bool
-- ^ Force treating all local packages as having dirty files
,boptsTests :: !Bool
-- ^ Turn on tests for local targets
,boptsTestOpts :: !TestOpts
-- ^ Additional test arguments
,boptsBenchmarks :: !Bool
-- ^ Turn on benchmarks for local targets
,boptsBenchmarkOpts :: !BenchmarkOpts
-- ^ Additional test arguments
,boptsExec :: ![(String, [String])]
-- ^ Commands (with arguments) to run after a successful build
,boptsOnlyConfigure :: !Bool
-- ^ Only perform the configure step when building
,boptsReconfigure :: !Bool
-- ^ Perform the configure step even if already configured
,boptsCabalVerbose :: !Bool
-- ^ Ask Cabal to be verbose in its builds
}
deriving (Show)
defaultBuildOpts :: BuildOpts
defaultBuildOpts = BuildOpts
{ boptsTargets = []
, boptsLibProfile = False
, boptsExeProfile = False
, boptsHaddock = False
, boptsHaddockDeps = Nothing
, boptsDryrun = False
, boptsGhcOptions = []
, boptsFlags = Map.empty
, boptsInstallExes = False
, boptsPreFetch = False
, boptsBuildSubset = BSAll
, boptsFileWatch = NoFileWatch
, boptsKeepGoing = Nothing
, boptsForceDirty = False
, boptsTests = False
, boptsTestOpts = defaultTestOpts
, boptsBenchmarks = False
, boptsBenchmarkOpts = defaultBenchmarkOpts
, boptsExec = []
, boptsOnlyConfigure = False
, boptsReconfigure = False
, boptsCabalVerbose = False
}
-- | Options for the 'FinalAction' 'DoTests'
data TestOpts =
TestOpts {toRerunTests :: !Bool -- ^ Whether successful tests will be run gain
,toAdditionalArgs :: ![String] -- ^ Arguments passed to the test program
,toCoverage :: !Bool -- ^ Generate a code coverage report
,toDisableRun :: !Bool -- ^ Disable running of tests
} deriving (Eq,Show)
defaultTestOpts :: TestOpts
defaultTestOpts = TestOpts
{ toRerunTests = True
, toAdditionalArgs = []
, toCoverage = False
, toDisableRun = False
}
-- | Options for the 'FinalAction' 'DoBenchmarks'
data BenchmarkOpts =
BenchmarkOpts {beoAdditionalArgs :: !(Maybe String) -- ^ Arguments passed to the benchmark program
,beoDisableRun :: !Bool -- ^ Disable running of benchmarks
} deriving (Eq,Show)
defaultBenchmarkOpts :: BenchmarkOpts
defaultBenchmarkOpts = BenchmarkOpts
{ beoAdditionalArgs = Nothing
, beoDisableRun = False
}
data FileWatchOpts
= NoFileWatch
| FileWatch
| FileWatchPoll
deriving (Show,Eq)
-- | Package dependency oracle.
newtype PkgDepsOracle =
PkgDeps PackageName
deriving (Show,Typeable,Eq,Hashable,Binary,NFData)
-- | Stored on disk to know whether the flags have changed or any
-- files have changed.
data ConfigCache = ConfigCache
{ configCacheOpts :: !ConfigureOpts
-- ^ All options used for this package.
, configCacheDeps :: !(Set GhcPkgId)
-- ^ The GhcPkgIds of all of the dependencies. Since Cabal doesn't take
-- the complete GhcPkgId (only a PackageIdentifier) in the configure
-- options, just using the previous value is insufficient to know if
-- dependencies have changed.
, configCacheComponents :: !(Set S.ByteString)
-- ^ The components to be built. It's a bit of a hack to include this in
-- here, as it's not a configure option (just a build option), but this
-- is a convenient way to force compilation when the components change.
, configCacheHaddock :: !Bool
-- ^ Are haddocks to be built?
}
deriving (Generic,Eq,Show)
instance Binary ConfigCache where
put x = do
-- magic string
putWord8 1
putWord8 3
putWord8 4
putWord8 8
gput $ from x
get = do
1 <- getWord8
3 <- getWord8
4 <- getWord8
8 <- getWord8
fmap to gget
instance NFData ConfigCache
instance HasStructuralInfo ConfigCache
instance HasSemanticVersion ConfigCache
-- | A task to perform when building
data Task = Task
{ taskProvides :: !PackageIdentifier
-- ^ the package/version to be built
, taskType :: !TaskType
-- ^ the task type, telling us how to build this
, taskConfigOpts :: !TaskConfigOpts
, taskPresent :: !(Map PackageIdentifier GhcPkgId)
-- ^ GhcPkgIds of already-installed dependencies
, taskAllInOne :: !Bool
-- ^ indicates that the package can be built in one step
}
deriving Show
-- | Given the IDs of any missing packages, produce the configure options
data TaskConfigOpts = TaskConfigOpts
{ tcoMissing :: !(Set PackageIdentifier)
-- ^ Dependencies for which we don't yet have an GhcPkgId
, tcoOpts :: !(Map PackageIdentifier GhcPkgId -> ConfigureOpts)
-- ^ Produce the list of options given the missing @GhcPkgId@s
}
instance Show TaskConfigOpts where
show (TaskConfigOpts missing f) = concat
[ "Missing: "
, show missing
, ". Without those: "
, show $ f Map.empty
]
-- | The type of a task, either building local code or something from the
-- package index (upstream)
data TaskType = TTLocal LocalPackage
| TTUpstream Package InstallLocation
deriving Show
taskLocation :: Task -> InstallLocation
taskLocation task =
case taskType task of
TTLocal _ -> Local
TTUpstream _ loc -> loc
-- | A complete plan of what needs to be built and how to do it
data Plan = Plan
{ planTasks :: !(Map PackageName Task)
, planFinals :: !(Map PackageName Task)
-- ^ Final actions to be taken (test, benchmark, etc)
, planUnregisterLocal :: !(Map GhcPkgId (PackageIdentifier, Maybe Text))
-- ^ Text is reason we're unregistering, for display only
, planInstallExes :: !(Map Text InstallLocation)
-- ^ Executables that should be installed after successful building
}
deriving Show
-- | Basic information used to calculate what the configure options are
data BaseConfigOpts = BaseConfigOpts
{ bcoSnapDB :: !(Path Abs Dir)
, bcoLocalDB :: !(Path Abs Dir)
, bcoSnapInstallRoot :: !(Path Abs Dir)
, bcoLocalInstallRoot :: !(Path Abs Dir)
, bcoBuildOpts :: !BuildOpts
, bcoExtraDBs :: ![(Path Abs Dir)]
}
-- | Render a @BaseConfigOpts@ to an actual list of options
configureOpts :: EnvConfig
-> BaseConfigOpts
-> Map PackageIdentifier GhcPkgId -- ^ dependencies
-> Bool -- ^ wanted?
-> Bool -- ^ local non-extra-dep?
-> InstallLocation
-> Package
-> ConfigureOpts
configureOpts econfig bco deps wanted isLocal loc package = ConfigureOpts
{ coDirs = configureOptsDirs bco loc package
, coNoDirs = configureOptsNoDir econfig bco deps wanted isLocal package
}
configureOptsDirs :: BaseConfigOpts
-> InstallLocation
-> Package
-> [String]
configureOptsDirs bco loc package = concat
[ ["--user", "--package-db=clear", "--package-db=global"]
, map (("--package-db=" ++) . toFilePath) $ case loc of
Snap -> bcoExtraDBs bco ++ [bcoSnapDB bco]
Local -> bcoExtraDBs bco ++ [bcoSnapDB bco] ++ [bcoLocalDB bco]
, [ "--libdir=" ++ toFilePathNoTrailingSep (installRoot </> $(mkRelDir "lib"))
, "--bindir=" ++ toFilePathNoTrailingSep (installRoot </> bindirSuffix)
, "--datadir=" ++ toFilePathNoTrailingSep (installRoot </> $(mkRelDir "share"))
, "--libexecdir=" ++ toFilePathNoTrailingSep (installRoot </> $(mkRelDir "libexec"))
, "--sysconfdir=" ++ toFilePathNoTrailingSep (installRoot </> $(mkRelDir "etc"))
, "--docdir=" ++ toFilePathNoTrailingSep docDir
, "--htmldir=" ++ toFilePathNoTrailingSep docDir
, "--haddockdir=" ++ toFilePathNoTrailingSep docDir]
]
where
installRoot =
case loc of
Snap -> bcoSnapInstallRoot bco
Local -> bcoLocalInstallRoot bco
docDir =
case pkgVerDir of
Nothing -> installRoot </> docDirSuffix
Just dir -> installRoot </> docDirSuffix </> dir
pkgVerDir =
parseRelDir (packageIdentifierString (PackageIdentifier (packageName package)
(packageVersion package)) ++
[pathSeparator])
-- | Same as 'configureOpts', but does not include directory path options
configureOptsNoDir :: EnvConfig
-> BaseConfigOpts
-> Map PackageIdentifier GhcPkgId -- ^ dependencies
-> Bool -- ^ wanted?
-> Bool -- ^ is this a local, non-extra-dep?
-> Package
-> [String]
configureOptsNoDir econfig bco deps wanted isLocal package = concat
[ depOptions
, ["--enable-library-profiling" | boptsLibProfile bopts || boptsExeProfile bopts]
, ["--enable-executable-profiling" | boptsExeProfile bopts]
, map (\(name,enabled) ->
"-f" <>
(if enabled
then ""
else "-") <>
flagNameString name)
(Map.toList (packageFlags package))
, concatMap (\x -> ["--ghc-options", T.unpack x]) allGhcOptions
, map (("--extra-include-dirs=" ++) . T.unpack) (Set.toList (configExtraIncludeDirs config))
, map (("--extra-lib-dirs=" ++) . T.unpack) (Set.toList (configExtraLibDirs config))
, if whichCompiler (envConfigCompilerVersion econfig) == Ghcjs
then ["--ghcjs"]
else []
]
where
config = getConfig econfig
bopts = bcoBuildOpts bco
depOptions = map (uncurry toDepOption) $ Map.toList deps
where
toDepOption =
if envConfigCabalVersion econfig >= $(mkVersion "1.22")
then toDepOption1_22
else toDepOption1_18
toDepOption1_22 ident gid = concat
[ "--dependency="
, packageNameString $ packageIdentifierName ident
, "="
, ghcPkgIdString gid
]
toDepOption1_18 ident _gid = concat
[ "--constraint="
, packageNameString name
, "=="
, versionString version
]
where
PackageIdentifier name version = ident
ghcOptionsMap = configGhcOptions $ getConfig econfig
allGhcOptions = concat
[ Map.findWithDefault [] Nothing ghcOptionsMap
, Map.findWithDefault [] (Just $ packageName package) ghcOptionsMap
, if includeExtraOptions
then boptsGhcOptions bopts
else []
]
includeExtraOptions =
case configApplyGhcOptions config of
AGOTargets -> wanted
AGOLocals -> isLocal
AGOEverything -> True
-- | Get set of wanted package names from locals.
wantedLocalPackages :: [LocalPackage] -> Set PackageName
wantedLocalPackages = Set.fromList . map (packageName . lpPackage) . filter lpWanted
-- | One-way conversion to serialized time.
modTime :: UTCTime -> ModTime
modTime x =
ModTime
( toModifiedJulianDay
(utctDay x)
, toRational
(utctDayTime x))
-- | Configure options to be sent to Setup.hs configure
data ConfigureOpts = ConfigureOpts
{ coDirs :: ![String]
-- ^ Options related to various paths. We separate these out since they do
-- not have an impact on the contents of the compiled binary for checking
-- if we can use an existing precompiled cache.
, coNoDirs :: ![String]
}
deriving (Show, Eq, Generic)
instance Binary ConfigureOpts
instance HasStructuralInfo ConfigureOpts
instance NFData ConfigureOpts
-- | Information on a compiled package: the library conf file (if relevant),
-- and all of the executable paths.
data PrecompiledCache = PrecompiledCache
-- Use FilePath instead of Path Abs File for Binary instances
{ pcLibrary :: !(Maybe FilePath)
-- ^ .conf file inside the package database
, pcExes :: ![FilePath]
-- ^ Full paths to executables
}
deriving (Show, Eq, Generic)
instance Binary PrecompiledCache
instance HasSemanticVersion PrecompiledCache
instance HasStructuralInfo PrecompiledCache
instance NFData PrecompiledCache
| vigoo/stack | src/Stack/Types/Build.hs | bsd-3-clause | 30,493 | 0 | 21 | 9,666 | 5,724 | 3,130 | 2,594 | 708 | 7 |
{-# LANGUAGE NoMonomorphismRestriction #-}
module RE.Parse where
import Control.Applicative
import Text.Trifecta
import Text.Trifecta.Delta
import Text.Trifecta.Parser
import Text.Trifecta.Result
import RE.AST
parse s = let Success ast = parseString expr (Lines 0 0 0 0) s in
ast
expr = term `chainl1` or_op
where or_op = char '|' *> (pure $ Binary Alternative)
term = factor `chainl1` and_op
where and_op = pure $ Binary Sequence
factor = try (Unary <$> basic_char <*> unary_op)
<|> basic_char
basic_char = Literal <$> noneOf "|*+?"
unary_op = (pure Many <* char '*')
<|> (pure Many1 <* char '+')
<|> (pure Optional <* char '?')
| forestbelton/revm | src/RE/Parse.hs | bsd-3-clause | 662 | 0 | 11 | 130 | 229 | 121 | 108 | 20 | 1 |
--
--
--
-----------------
-- Exercise 11.5.
-----------------
--
--
--
module E'11''5 where
-- zipWith applies the application operator to each element in the first list "[ sum , product ]".
-- The arguments for each application are in the second list "[ [1, 2] , [3, 4] ]".
--
-- Each application result is an element in the resulting list of "zipWith".
{- GHCi>
zipWith ($) [ sum , product ] [ [1, 2] , [3, 4] ]
-}
-- [ 3 , 12 ]
| pascal-knodel/haskell-craft | _/links/E'11''5.hs | mit | 444 | 0 | 2 | 99 | 19 | 18 | 1 | 1 | 0 |
{-# LANGUAGE Arrows #-}
{-# LANGUAGE FlexibleContexts #-}
module Environment
(
cellUnoccupied
, cellOccupied
, regrow
, sugEnvironment
) where
import Data.Maybe
import Control.Monad.Random
import Control.Monad.State.Strict
import FRP.BearRiver
import Common
import Discrete
import Model
------------------------------------------------------------------------------------------------------------------------
-- ENVIRONMENT-BEHAVIOUR
------------------------------------------------------------------------------------------------------------------------
cellOccupied :: SugEnvCell -> Bool
cellOccupied cell = isJust $ sugEnvOccupier cell
cellUnoccupied :: SugEnvCell -> Bool
cellUnoccupied = not . cellOccupied
regrowSugar :: (MonadState SugEnvironment m)
=> Double
-> m ()
regrowSugar rate
| rate < 0 = regrowSugarToMax
| otherwise = regrowSugarByRate
where
regrowSugarByRate :: (MonadState SugEnvironment m)
=> m ()
regrowSugarByRate
= updateCellsM (\c ->
c { sugEnvSugarLevel =
min
(sugEnvSugarCapacity c)
((sugEnvSugarLevel c) + rate)}) -- if this bracket is omited it leads to a bug: all environment cells have +1 level
regrowSugarToMax :: (MonadState SugEnvironment m) => m ()
regrowSugarToMax = updateCellsM (\c -> c { sugEnvSugarLevel = sugEnvSugarCapacity c})
regrow :: (MonadState SugEnvironment m) => m ()
regrow = regrowSugar sugarGrowbackUnits
sugEnvironment :: RandomGen g
=> SugAgent g
sugEnvironment = proc _ -> do
arrM_ (lift lockEnvironment) -< ()
env <- arrM_ (lift readEnvironment) -< ()
(_, env') <- arrM (lift . runStateT regrow) -< env
arrM (lift . writeEnvironment) -< env'
arrM_ (lift unlockEnvironment) -< ()
returnA -< agentOut | thalerjonathan/phd | thesis/code/concurrent/sugarscape/SugarScapeIO/src/Environment.hs | gpl-3.0 | 1,928 | 1 | 16 | 477 | 438 | 230 | 208 | 46 | 1 |
module HplAssets.Hephaestus.Parser.HephaestusParser where
import HplAssets.Hephaestus.Types
import BasicTypes
-- imports to function parserHephaestus
import Language.Haskell.Parser
-- import to function outputHephaestus
import Language.Haskell.Syntax
import Language.Haskell.Pretty
import Distribution.ModuleName
parserHephaestus =
do
x <- readFile "HplProducts/BaseProduct.hs"
let (ParseOk y) = parseModule x
x2 <- readFile "HplProducts/BaseProductTypes.hs"
let (ParseOk y2) = parseModule x2
let hephModel = HephaestusModel [y, y2]
return $ Success hephModel
outputHephaestus:: HephaestusModel -> IO()
outputHephaestus (HephaestusModel [p1, p2]) = do
let (HsModule _ (Module m) _ _ _) = p1
writeFile (toFilePath (fromString m) ++ ".hs") (prettyPrint p1)
let (HsModule _ (Module m) _ _ _) = p2
writeFile (toFilePath (fromString m) ++ ".hs") (prettyPrint p2)
return () | alessandroleite/hephaestus-pl | src/meta-hephaestus/HplAssets/Hephaestus/Parser/HephaestusParser.hs | lgpl-3.0 | 958 | 0 | 13 | 196 | 295 | 146 | 149 | 22 | 1 |
{-# LANGUAGE RankNTypes #-}
{-# LANGUAGE TypeInType #-}
module Bug where
import Data.Kind
type HRank1 ty = forall k1. k1 -> ty
type HRank2 ty = forall k2. k2 -> ty
data HREFL11 :: HRank1 (HRank1 Type) -- FAILS
data HREFL12 :: HRank1 (HRank2 Type)
data HREFL21 :: HRank2 (HRank1 Type)
data HREFL22 :: HRank2 (HRank2 Type) -- FAILS
| shlevy/ghc | testsuite/tests/polykinds/T14515.hs | bsd-3-clause | 333 | 0 | 7 | 62 | 105 | 62 | 43 | -1 | -1 |
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE ScopedTypeVariables #-}
-----------------------------------------------------------------------------
-- |
-- Module : Distribution.Simple.Configure
-- Copyright : Isaac Jones 2003-2005
-- License : BSD3
--
-- Maintainer : [email protected]
-- Portability : portable
--
-- This deals with the /configure/ phase. It provides the 'configure' action
-- which is given the package description and configure flags. It then tries
-- to: configure the compiler; resolves any conditionals in the package
-- description; resolve the package dependencies; check if all the extensions
-- used by this package are supported by the compiler; check that all the build
-- tools are available (including version checks if appropriate); checks for
-- any required @pkg-config@ packages (updating the 'BuildInfo' with the
-- results)
--
-- Then based on all this it saves the info in the 'LocalBuildInfo' and writes
-- it out to the @dist\/setup-config@ file. It also displays various details to
-- the user, the amount of information displayed depending on the verbosity
-- level.
module Distribution.Simple.Configure (configure,
writePersistBuildConfig,
getConfigStateFile,
getPersistBuildConfig,
checkPersistBuildConfigOutdated,
tryGetPersistBuildConfig,
maybeGetPersistBuildConfig,
findDistPref, findDistPrefOrDefault,
computeComponentId,
computeCompatPackageKey,
localBuildInfoFile,
getInstalledPackages,
getInstalledPackagesMonitorFiles,
getPackageDBContents,
configCompiler, configCompilerAux,
configCompilerEx, configCompilerAuxEx,
computeEffectiveProfiling,
ccLdOptionsBuildInfo,
checkForeignDeps,
interpretPackageDbFlags,
ConfigStateFileError(..),
tryGetConfigStateFile,
platformDefines,
relaxPackageDeps,
)
where
import Distribution.Compiler
import Distribution.Utils.NubList
import Distribution.Simple.Compiler hiding (Flag)
import Distribution.Simple.PreProcess
import Distribution.Package
import qualified Distribution.InstalledPackageInfo as Installed
import Distribution.InstalledPackageInfo (InstalledPackageInfo
,emptyInstalledPackageInfo)
import qualified Distribution.Simple.PackageIndex as PackageIndex
import Distribution.Simple.PackageIndex (InstalledPackageIndex)
import Distribution.PackageDescription as PD hiding (Flag)
import Distribution.ModuleName
import Distribution.PackageDescription.Configuration
import Distribution.PackageDescription.Check hiding (doesFileExist)
import Distribution.Simple.Program
import Distribution.Simple.Setup as Setup
import qualified Distribution.Simple.InstallDirs as InstallDirs
import Distribution.Simple.LocalBuildInfo
import Distribution.Simple.BuildPaths
import Distribution.Simple.Utils
import Distribution.System
import Distribution.Version
import Distribution.Verbosity
import qualified Distribution.Simple.GHC as GHC
import qualified Distribution.Simple.GHCJS as GHCJS
import qualified Distribution.Simple.JHC as JHC
import qualified Distribution.Simple.LHC as LHC
import qualified Distribution.Simple.UHC as UHC
import qualified Distribution.Simple.HaskellSuite as HaskellSuite
-- Prefer the more generic Data.Traversable.mapM to Prelude.mapM
import Prelude hiding ( mapM )
import Control.Exception
( Exception, evaluate, throw, throwIO, try )
import Control.Exception ( ErrorCall )
import Control.Monad
( liftM, when, unless, foldM, filterM, mplus )
import Distribution.Compat.Binary ( decodeOrFailIO, encode )
import GHC.Fingerprint ( Fingerprint(..), fingerprintString )
import Data.ByteString.Lazy (ByteString)
import qualified Data.ByteString as BS
import qualified Data.ByteString.Lazy.Char8 as BLC8
import Data.List
( (\\), nub, partition, isPrefixOf, inits, stripPrefix )
import Data.Maybe
( isNothing, catMaybes, fromMaybe, mapMaybe, isJust )
import Data.Either
( partitionEithers )
import qualified Data.Set as Set
import Data.Monoid as Mon ( Monoid(..) )
import qualified Data.Map as Map
import Data.Map (Map)
import Data.Traversable
( mapM )
import Data.Typeable
import Data.Char ( chr, isAlphaNum )
import Numeric ( showIntAtBase )
import System.Directory
( doesFileExist, createDirectoryIfMissing, getTemporaryDirectory )
import System.FilePath
( (</>), isAbsolute )
import qualified System.Info
( compilerName, compilerVersion )
import System.IO
( hPutStrLn, hClose )
import Distribution.Text
( Text(disp), defaultStyle, display, simpleParse )
import Text.PrettyPrint
( Doc, (<>), (<+>), ($+$), char, comma, empty, hsep, nest
, punctuate, quotes, render, renderStyle, sep, text )
import Distribution.Compat.Environment ( lookupEnv )
import Distribution.Compat.Exception ( catchExit, catchIO )
-- | The errors that can be thrown when reading the @setup-config@ file.
data ConfigStateFileError
= ConfigStateFileNoHeader -- ^ No header found.
| ConfigStateFileBadHeader -- ^ Incorrect header.
| ConfigStateFileNoParse -- ^ Cannot parse file contents.
| ConfigStateFileMissing -- ^ No file!
| ConfigStateFileBadVersion PackageIdentifier PackageIdentifier
(Either ConfigStateFileError LocalBuildInfo) -- ^ Mismatched version.
deriving (Typeable)
-- | Format a 'ConfigStateFileError' as a user-facing error message.
dispConfigStateFileError :: ConfigStateFileError -> Doc
dispConfigStateFileError ConfigStateFileNoHeader =
text "Saved package config file header is missing."
<+> text "Re-run the 'configure' command."
dispConfigStateFileError ConfigStateFileBadHeader =
text "Saved package config file header is corrupt."
<+> text "Re-run the 'configure' command."
dispConfigStateFileError ConfigStateFileNoParse =
text "Saved package config file is corrupt."
<+> text "Re-run the 'configure' command."
dispConfigStateFileError ConfigStateFileMissing =
text "Run the 'configure' command first."
dispConfigStateFileError (ConfigStateFileBadVersion oldCabal oldCompiler _) =
text "Saved package config file is outdated:"
$+$ badCabal $+$ badCompiler
$+$ text "Re-run the 'configure' command."
where
badCabal =
text "• the Cabal version changed from"
<+> disp oldCabal <+> "to" <+> disp currentCabalId
badCompiler
| oldCompiler == currentCompilerId = empty
| otherwise =
text "• the compiler changed from"
<+> disp oldCompiler <+> "to" <+> disp currentCompilerId
instance Show ConfigStateFileError where
show = renderStyle defaultStyle . dispConfigStateFileError
instance Exception ConfigStateFileError
-- | Read the 'localBuildInfoFile'. Throw an exception if the file is
-- missing, if the file cannot be read, or if the file was created by an older
-- version of Cabal.
getConfigStateFile :: FilePath -- ^ The file path of the @setup-config@ file.
-> IO LocalBuildInfo
getConfigStateFile filename = do
exists <- doesFileExist filename
unless exists $ throwIO ConfigStateFileMissing
-- Read the config file into a strict ByteString to avoid problems with
-- lazy I/O, then convert to lazy because the binary package needs that.
contents <- BS.readFile filename
let (header, body) = BLC8.span (/='\n') (BLC8.fromChunks [contents])
headerParseResult <- try $ evaluate $ parseHeader header
let (cabalId, compId) =
case headerParseResult of
Left (_ :: ErrorCall) -> throw ConfigStateFileBadHeader
Right x -> x
let getStoredValue = do
result <- decodeOrFailIO (BLC8.tail body)
case result of
Left _ -> throw ConfigStateFileNoParse
Right x -> return x
deferErrorIfBadVersion act
| cabalId /= currentCabalId = do
eResult <- try act
throw $ ConfigStateFileBadVersion cabalId compId eResult
| otherwise = act
deferErrorIfBadVersion getStoredValue
-- | Read the 'localBuildInfoFile', returning either an error or the local build
-- info.
tryGetConfigStateFile :: FilePath -- ^ The file path of the @setup-config@ file.
-> IO (Either ConfigStateFileError LocalBuildInfo)
tryGetConfigStateFile = try . getConfigStateFile
-- | Try to read the 'localBuildInfoFile'.
tryGetPersistBuildConfig :: FilePath -- ^ The @dist@ directory path.
-> IO (Either ConfigStateFileError LocalBuildInfo)
tryGetPersistBuildConfig = try . getPersistBuildConfig
-- | Read the 'localBuildInfoFile'. Throw an exception if the file is
-- missing, if the file cannot be read, or if the file was created by an older
-- version of Cabal.
getPersistBuildConfig :: FilePath -- ^ The @dist@ directory path.
-> IO LocalBuildInfo
getPersistBuildConfig = getConfigStateFile . localBuildInfoFile
-- | Try to read the 'localBuildInfoFile'.
maybeGetPersistBuildConfig :: FilePath -- ^ The @dist@ directory path.
-> IO (Maybe LocalBuildInfo)
maybeGetPersistBuildConfig =
liftM (either (const Nothing) Just) . tryGetPersistBuildConfig
-- | After running configure, output the 'LocalBuildInfo' to the
-- 'localBuildInfoFile'.
writePersistBuildConfig :: FilePath -- ^ The @dist@ directory path.
-> LocalBuildInfo -- ^ The 'LocalBuildInfo' to write.
-> IO ()
writePersistBuildConfig distPref lbi = do
createDirectoryIfMissing False distPref
writeFileAtomic (localBuildInfoFile distPref) $
BLC8.unlines [showHeader pkgId, encode lbi]
where
pkgId = packageId $ localPkgDescr lbi
-- | Identifier of the current Cabal package.
currentCabalId :: PackageIdentifier
currentCabalId = PackageIdentifier (PackageName "Cabal") cabalVersion
-- | Identifier of the current compiler package.
currentCompilerId :: PackageIdentifier
currentCompilerId = PackageIdentifier (PackageName System.Info.compilerName)
System.Info.compilerVersion
-- | Parse the @setup-config@ file header, returning the package identifiers
-- for Cabal and the compiler.
parseHeader :: ByteString -- ^ The file contents.
-> (PackageIdentifier, PackageIdentifier)
parseHeader header = case BLC8.words header of
["Saved", "package", "config", "for", pkgId, "written", "by", cabalId,
"using", compId] ->
fromMaybe (throw ConfigStateFileBadHeader) $ do
_ <- simpleParse (BLC8.unpack pkgId) :: Maybe PackageIdentifier
cabalId' <- simpleParse (BLC8.unpack cabalId)
compId' <- simpleParse (BLC8.unpack compId)
return (cabalId', compId')
_ -> throw ConfigStateFileNoHeader
-- | Generate the @setup-config@ file header.
showHeader :: PackageIdentifier -- ^ The processed package.
-> ByteString
showHeader pkgId = BLC8.unwords
[ "Saved", "package", "config", "for"
, BLC8.pack $ display pkgId
, "written", "by"
, BLC8.pack $ display currentCabalId
, "using"
, BLC8.pack $ display currentCompilerId
]
-- | Check that localBuildInfoFile is up-to-date with respect to the
-- .cabal file.
checkPersistBuildConfigOutdated :: FilePath -> FilePath -> IO Bool
checkPersistBuildConfigOutdated distPref pkg_descr_file = do
pkg_descr_file `moreRecentFile` (localBuildInfoFile distPref)
-- | Get the path of @dist\/setup-config@.
localBuildInfoFile :: FilePath -- ^ The @dist@ directory path.
-> FilePath
localBuildInfoFile distPref = distPref </> "setup-config"
-- -----------------------------------------------------------------------------
-- * Configuration
-- -----------------------------------------------------------------------------
-- | Return the \"dist/\" prefix, or the default prefix. The prefix is taken
-- from (in order of highest to lowest preference) the override prefix, the
-- \"CABAL_BUILDDIR\" environment variable, or the default prefix.
findDistPref :: FilePath -- ^ default \"dist\" prefix
-> Setup.Flag FilePath -- ^ override \"dist\" prefix
-> IO FilePath
findDistPref defDistPref overrideDistPref = do
envDistPref <- liftM parseEnvDistPref (lookupEnv "CABAL_BUILDDIR")
return $ fromFlagOrDefault defDistPref (mappend envDistPref overrideDistPref)
where
parseEnvDistPref env =
case env of
Just distPref | not (null distPref) -> toFlag distPref
_ -> NoFlag
-- | Return the \"dist/\" prefix, or the default prefix. The prefix is taken
-- from (in order of highest to lowest preference) the override prefix, the
-- \"CABAL_BUILDDIR\" environment variable, or 'defaultDistPref' is used. Call
-- this function to resolve a @*DistPref@ flag whenever it is not known to be
-- set. (The @*DistPref@ flags are always set to a definite value before
-- invoking 'UserHooks'.)
findDistPrefOrDefault :: Setup.Flag FilePath -- ^ override \"dist\" prefix
-> IO FilePath
findDistPrefOrDefault = findDistPref defaultDistPref
-- | Compute the effective value of the profiling flags
-- @--enable-library-profiling@ and @--enable-executable-profiling@
-- from the specified 'ConfigFlags'. This may be useful for
-- external Cabal tools which need to interact with Setup in
-- a backwards-compatible way: the most predictable mechanism
-- for enabling profiling across many legacy versions is to
-- NOT use @--enable-profiling@ and use those two flags instead.
--
-- Note that @--enable-executable-profiling@ also affects profiling
-- of benchmarks and (non-detailed) test suites.
computeEffectiveProfiling :: ConfigFlags -> (Bool {- lib -}, Bool {- exe -})
computeEffectiveProfiling cfg =
-- The --profiling flag sets the default for both libs and exes,
-- but can be overidden by --library-profiling, or the old deprecated
-- --executable-profiling flag.
--
-- The --profiling-detail and --library-profiling-detail flags behave
-- similarly
let profEnabledBoth = fromFlagOrDefault False (configProf cfg)
profEnabledLib = fromFlagOrDefault profEnabledBoth (configProfLib cfg)
profEnabledExe = fromFlagOrDefault profEnabledBoth (configProfExe cfg)
in (profEnabledLib, profEnabledExe)
-- |Perform the \"@.\/setup configure@\" action.
-- Returns the @.setup-config@ file.
configure :: (GenericPackageDescription, HookedBuildInfo)
-> ConfigFlags -> IO LocalBuildInfo
configure (pkg_descr0', pbi) cfg = do
let pkg_descr0 =
-- Ignore '--allow-newer' when we're given '--exact-configuration'.
if fromFlagOrDefault False (configExactConfiguration cfg)
then pkg_descr0'
else relaxPackageDeps
(fromMaybe AllowNewerNone $ configAllowNewer cfg)
pkg_descr0'
setupMessage verbosity "Configuring" (packageId pkg_descr0)
checkDeprecatedFlags verbosity cfg
checkExactConfiguration pkg_descr0 cfg
-- Where to build the package
let buildDir :: FilePath -- e.g. dist/build
-- fromFlag OK due to Distribution.Simple calling
-- findDistPrefOrDefault to fill it in
buildDir = fromFlag (configDistPref cfg) </> "build"
createDirectoryIfMissingVerbose (lessVerbose verbosity) True buildDir
-- What package database(s) to use
let packageDbs
= interpretPackageDbFlags
(fromFlag (configUserInstall cfg))
(configPackageDBs cfg)
-- comp: the compiler we're building with
-- compPlatform: the platform we're building for
-- programsConfig: location and args of all programs we're
-- building with
(comp, compPlatform, programsConfig)
<- configCompilerEx
(flagToMaybe (configHcFlavor cfg))
(flagToMaybe (configHcPath cfg))
(flagToMaybe (configHcPkg cfg))
(mkProgramsConfig cfg (configPrograms cfg))
(lessVerbose verbosity)
-- The InstalledPackageIndex of all installed packages
installedPackageSet <- getInstalledPackages (lessVerbose verbosity) comp
packageDbs programsConfig
-- The InstalledPackageIndex of all (possible) internal packages
let internalPackageSet = getInternalPackages pkg_descr0
-- allConstraints: The set of all 'Dependency's we have. Used ONLY
-- to 'configureFinalizedPackage'.
-- requiredDepsMap: A map from 'PackageName' to the specifically
-- required 'InstalledPackageInfo', due to --dependency
--
-- NB: These constraints are to be applied to ALL components of
-- a package. Thus, it's not an error if allConstraints contains
-- more constraints than is necessary for a component (another
-- component might need it.)
--
-- NB: The fact that we bundle all the constraints together means
-- that is not possible to configure a test-suite to use one
-- version of a dependency, and the executable to use another.
(allConstraints, requiredDepsMap) <- either die return $
combinedConstraints (configConstraints cfg)
(configDependencies cfg)
installedPackageSet
-- pkg_descr: The resolved package description, that does not contain any
-- conditionals, because we have have an assignment for
-- every flag, either picking them ourselves using a
-- simple naive algorithm, or having them be passed to
-- us by 'configConfigurationsFlags')
-- flags: The 'FlagAssignment' that the conditionals were
-- resolved with.
--
-- NB: Why doesn't finalizing a package also tell us what the
-- dependencies are (e.g. when we run the naive algorithm,
-- we are checking if dependencies are satisfiable)? The
-- primary reason is that we may NOT have done any solving:
-- if the flags are all chosen for us, this step is a simple
-- matter of flattening according to that assignment. It's
-- cleaner to then configure the dependencies afterwards.
(pkg_descr, flags)
<- configureFinalizedPackage verbosity cfg
allConstraints
(dependencySatisfiable
(fromFlagOrDefault False (configExactConfiguration cfg))
installedPackageSet
internalPackageSet
requiredDepsMap)
comp
compPlatform
pkg_descr0
checkCompilerProblems comp pkg_descr
checkPackageProblems verbosity pkg_descr0
(updatePackageDescription pbi pkg_descr)
-- The list of 'InstalledPackageInfo' recording the selected
-- dependencies...
-- internalPkgDeps: ...on internal packages (these are fake!)
-- externalPkgDeps: ...on external packages
--
-- Invariant: For any package name, there is at most one package
-- in externalPackageDeps which has that name.
--
-- NB: The dependency selection is global over ALL components
-- in the package (similar to how allConstraints and
-- requiredDepsMap are global over all components). In particular,
-- if *any* component (post-flag resolution) has an unsatisfiable
-- dependency, we will fail. This can sometimes be undesirable
-- for users, see #1786 (benchmark conflicts with executable),
(internalPkgDeps, externalPkgDeps)
<- configureDependencies
verbosity
internalPackageSet
installedPackageSet
requiredDepsMap
pkg_descr
let installDeps = Map.elems -- deduplicate
. Map.fromList
. map (\v -> (Installed.installedUnitId v, v))
$ externalPkgDeps
packageDependsIndex <-
case PackageIndex.dependencyClosure installedPackageSet
(map Installed.installedUnitId installDeps) of
Left packageDependsIndex -> return packageDependsIndex
Right broken ->
die $ "The following installed packages are broken because other"
++ " packages they depend on are missing. These broken "
++ "packages must be rebuilt before they can be used.\n"
++ unlines [ "package "
++ display (packageId pkg)
++ " is broken due to missing package "
++ intercalate ", " (map display deps)
| (pkg, deps) <- broken ]
let pseudoTopPkg = emptyInstalledPackageInfo {
Installed.installedUnitId =
mkLegacyUnitId (packageId pkg_descr),
Installed.sourcePackageId = packageId pkg_descr,
Installed.depends =
map Installed.installedUnitId installDeps
}
case PackageIndex.dependencyInconsistencies
. PackageIndex.insert pseudoTopPkg
$ packageDependsIndex of
[] -> return ()
inconsistencies ->
warn verbosity $
"This package indirectly depends on multiple versions of the same "
++ "package. This is highly likely to cause a compile failure.\n"
++ unlines [ "package " ++ display pkg ++ " requires "
++ display (PackageIdentifier name ver)
| (name, uses) <- inconsistencies
, (pkg, ver) <- uses ]
-- installation directories
defaultDirs <- defaultInstallDirs (compilerFlavor comp)
(fromFlag (configUserInstall cfg)) (hasLibs pkg_descr)
let installDirs = combineInstallDirs fromFlagOrDefault
defaultDirs (configInstallDirs cfg)
-- check languages and extensions
let langlist = nub $ catMaybes $ map defaultLanguage
(allBuildInfo pkg_descr)
let langs = unsupportedLanguages comp langlist
when (not (null langs)) $
die $ "The package " ++ display (packageId pkg_descr0)
++ " requires the following languages which are not "
++ "supported by " ++ display (compilerId comp) ++ ": "
++ intercalate ", " (map display langs)
let extlist = nub $ concatMap allExtensions (allBuildInfo pkg_descr)
let exts = unsupportedExtensions comp extlist
when (not (null exts)) $
die $ "The package " ++ display (packageId pkg_descr0)
++ " requires the following language extensions which are not "
++ "supported by " ++ display (compilerId comp) ++ ": "
++ intercalate ", " (map display exts)
-- configured known/required programs & external build tools
-- exclude build-tool deps on "internal" exes in the same package
let requiredBuildTools =
[ buildTool
| let exeNames = map exeName (executables pkg_descr)
, bi <- allBuildInfo pkg_descr
, buildTool@(Dependency (PackageName toolName) reqVer)
<- buildTools bi
, let isInternal =
toolName `elem` exeNames
-- we assume all internal build-tools are
-- versioned with the package:
&& packageVersion pkg_descr `withinRange` reqVer
, not isInternal ]
programsConfig' <-
configureAllKnownPrograms (lessVerbose verbosity) programsConfig
>>= configureRequiredPrograms verbosity requiredBuildTools
(pkg_descr', programsConfig'') <-
configurePkgconfigPackages verbosity pkg_descr programsConfig'
-- internal component graph
buildComponents <-
case mkComponentsGraph pkg_descr internalPkgDeps of
Left componentCycle -> reportComponentCycle componentCycle
Right comps ->
mkComponentsLocalBuildInfo cfg comp packageDependsIndex pkg_descr
internalPkgDeps externalPkgDeps
comps (configConfigurationsFlags cfg)
split_objs <-
if not (fromFlag $ configSplitObjs cfg)
then return False
else case compilerFlavor comp of
GHC | compilerVersion comp >= Version [6,5] []
-> return True
GHCJS
-> return True
_ -> do warn verbosity
("this compiler does not support " ++
"--enable-split-objs; ignoring")
return False
let ghciLibByDefault =
case compilerId comp of
CompilerId GHC _ ->
-- If ghc is non-dynamic, then ghci needs object files,
-- so we build one by default.
--
-- Technically, archive files should be sufficient for ghci,
-- but because of GHC bug #8942, it has never been safe to
-- rely on them. By the time that bug was fixed, ghci had
-- been changed to read shared libraries instead of archive
-- files (see next code block).
not (GHC.isDynamic comp)
CompilerId GHCJS _ ->
not (GHCJS.isDynamic comp)
_ -> False
let sharedLibsByDefault
| fromFlag (configDynExe cfg) =
-- build a shared library if dynamically-linked
-- executables are requested
True
| otherwise = case compilerId comp of
CompilerId GHC _ ->
-- if ghc is dynamic, then ghci needs a shared
-- library, so we build one by default.
GHC.isDynamic comp
CompilerId GHCJS _ ->
GHCJS.isDynamic comp
_ -> False
withSharedLib_ =
-- build shared libraries if required by GHC or by the
-- executable linking mode, but allow the user to force
-- building only static library archives with
-- --disable-shared.
fromFlagOrDefault sharedLibsByDefault $ configSharedLib cfg
withDynExe_ = fromFlag $ configDynExe cfg
when (withDynExe_ && not withSharedLib_) $ warn verbosity $
"Executables will use dynamic linking, but a shared library "
++ "is not being built. Linking will fail if any executables "
++ "depend on the library."
let (profEnabledLib, profEnabledExe) = computeEffectiveProfiling cfg
profDetailLibOnly <- checkProfDetail (configProfLibDetail cfg)
profDetailBoth <- liftM (fromFlagOrDefault ProfDetailDefault)
(checkProfDetail (configProfDetail cfg))
let profDetailLib = fromFlagOrDefault profDetailBoth profDetailLibOnly
profDetailExe = profDetailBoth
when (profEnabledExe && not profEnabledLib) $
warn verbosity $
"Executables will be built with profiling, but library "
++ "profiling is disabled. Linking will fail if any executables "
++ "depend on the library."
let configCoverage_ =
mappend (configCoverage cfg) (configLibCoverage cfg)
cfg' = cfg { configCoverage = configCoverage_ }
reloc <-
if not (fromFlag $ configRelocatable cfg)
then return False
else return True
let lbi = LocalBuildInfo {
configFlags = cfg',
flagAssignment = flags,
extraConfigArgs = [], -- Currently configure does not
-- take extra args, but if it
-- did they would go here.
installDirTemplates = installDirs,
compiler = comp,
hostPlatform = compPlatform,
buildDir = buildDir,
componentsConfigs = buildComponents,
installedPkgs = packageDependsIndex,
pkgDescrFile = Nothing,
localPkgDescr = pkg_descr',
withPrograms = programsConfig'',
withVanillaLib = fromFlag $ configVanillaLib cfg,
withProfLib = profEnabledLib,
withSharedLib = withSharedLib_,
withDynExe = withDynExe_,
withProfExe = profEnabledExe,
withProfLibDetail = profDetailLib,
withProfExeDetail = profDetailExe,
withOptimization = fromFlag $ configOptimization cfg,
withDebugInfo = fromFlag $ configDebugInfo cfg,
withGHCiLib = fromFlagOrDefault ghciLibByDefault $
configGHCiLib cfg,
splitObjs = split_objs,
stripExes = fromFlag $ configStripExes cfg,
stripLibs = fromFlag $ configStripLibs cfg,
withPackageDB = packageDbs,
progPrefix = fromFlag $ configProgPrefix cfg,
progSuffix = fromFlag $ configProgSuffix cfg,
relocatable = reloc
}
when reloc (checkRelocatable verbosity pkg_descr lbi)
let dirs = absoluteInstallDirs pkg_descr lbi NoCopyDest
relative = prefixRelativeInstallDirs (packageId pkg_descr) lbi
unless (isAbsolute (prefix dirs)) $ die $
"expected an absolute directory name for --prefix: " ++ prefix dirs
info verbosity $ "Using " ++ display currentCabalId
++ " compiled by " ++ display currentCompilerId
info verbosity $ "Using compiler: " ++ showCompilerId comp
info verbosity $ "Using install prefix: " ++ prefix dirs
let dirinfo name dir isPrefixRelative =
info verbosity $ name ++ " installed in: " ++ dir ++ relNote
where relNote = case buildOS of
Windows | not (hasLibs pkg_descr)
&& isNothing isPrefixRelative
-> " (fixed location)"
_ -> ""
dirinfo "Binaries" (bindir dirs) (bindir relative)
dirinfo "Libraries" (libdir dirs) (libdir relative)
dirinfo "Dynamic libraries" (dynlibdir dirs) (dynlibdir relative)
dirinfo "Private binaries" (libexecdir dirs) (libexecdir relative)
dirinfo "Data files" (datadir dirs) (datadir relative)
dirinfo "Documentation" (docdir dirs) (docdir relative)
dirinfo "Configuration files" (sysconfdir dirs) (sysconfdir relative)
sequence_ [ reportProgram verbosity prog configuredProg
| (prog, configuredProg) <- knownPrograms programsConfig'' ]
return lbi
where
verbosity = fromFlag (configVerbosity cfg)
checkProfDetail (Flag (ProfDetailOther other)) = do
warn verbosity $
"Unknown profiling detail level '" ++ other
++ "', using default.\n"
++ "The profiling detail levels are: " ++ intercalate ", "
[ name | (name, _, _) <- knownProfDetailLevels ]
return (Flag ProfDetailDefault)
checkProfDetail other = return other
mkProgramsConfig :: ConfigFlags -> ProgramConfiguration -> ProgramConfiguration
mkProgramsConfig cfg initialProgramsConfig = programsConfig
where
programsConfig = userSpecifyArgss (configProgramArgs cfg)
. userSpecifyPaths (configProgramPaths cfg)
. setProgramSearchPath searchpath
$ initialProgramsConfig
searchpath = getProgramSearchPath (initialProgramsConfig)
++ map ProgramSearchPathDir
(fromNubList $ configProgramPathExtra cfg)
-- -----------------------------------------------------------------------------
-- Helper functions for configure
-- | Check if the user used any deprecated flags.
checkDeprecatedFlags :: Verbosity -> ConfigFlags -> IO ()
checkDeprecatedFlags verbosity cfg = do
unless (configProfExe cfg == NoFlag) $ do
let enable | fromFlag (configProfExe cfg) = "enable"
| otherwise = "disable"
warn verbosity
("The flag --" ++ enable ++ "-executable-profiling is deprecated. "
++ "Please use --" ++ enable ++ "-profiling instead.")
unless (configLibCoverage cfg == NoFlag) $ do
let enable | fromFlag (configLibCoverage cfg) = "enable"
| otherwise = "disable"
warn verbosity
("The flag --" ++ enable ++ "-library-coverage is deprecated. "
++ "Please use --" ++ enable ++ "-coverage instead.")
-- | Sanity check: if '--exact-configuration' was given, ensure that the
-- complete flag assignment was specified on the command line.
checkExactConfiguration :: GenericPackageDescription -> ConfigFlags -> IO ()
checkExactConfiguration pkg_descr0 cfg = do
when (fromFlagOrDefault False (configExactConfiguration cfg)) $ do
let cmdlineFlags = map fst (configConfigurationsFlags cfg)
allFlags = map flagName . genPackageFlags $ pkg_descr0
diffFlags = allFlags \\ cmdlineFlags
when (not . null $ diffFlags) $
die $ "'--exact-configuration' was given, "
++ "but the following flags were not specified: "
++ intercalate ", " (map show diffFlags)
-- | Create a PackageIndex that makes *any libraries that might be*
-- defined internally to this package look like installed packages, in
-- case an executable should refer to any of them as dependencies.
--
-- It must be *any libraries that might be* defined rather than the
-- actual definitions, because these depend on conditionals in the .cabal
-- file, and we haven't resolved them yet. finalizePackageDescription
-- does the resolution of conditionals, and it takes internalPackageSet
-- as part of its input.
--
-- Currently a package can define no more than one library (which has
-- the same name as the package) but we could extend this later.
-- If we later allowed private internal libraries, then here we would
-- need to pre-scan the conditional data to make a list of all private
-- libraries that could possibly be defined by the .cabal file.
getInternalPackages :: GenericPackageDescription
-> InstalledPackageIndex
getInternalPackages pkg_descr0 =
let pid :: PackageIdentifier -- e.g. foo-0.1
pid = packageId pkg_descr0
internalPackage = emptyInstalledPackageInfo {
--TODO: should use a per-compiler method to map the source
-- package ID into an installed package id we can use
-- for the internal package set. The use of
-- mkLegacyUnitId here is a hack.
Installed.installedUnitId = mkLegacyUnitId pid,
Installed.sourcePackageId = pid
}
in PackageIndex.fromList [internalPackage]
-- | Returns true if a dependency is satisfiable. This is to be passed
-- to finalizePackageDescription.
dependencySatisfiable
:: Bool
-> InstalledPackageIndex -- ^ installed set
-> InstalledPackageIndex -- ^ internal set
-> Map PackageName InstalledPackageInfo -- ^ required dependencies
-> (Dependency -> Bool)
dependencySatisfiable
exact_config installedPackageSet internalPackageSet requiredDepsMap
d@(Dependency depName _)
| exact_config =
-- When we're given '--exact-configuration', we assume that all
-- dependencies and flags are exactly specified on the command
-- line. Thus we only consult the 'requiredDepsMap'. Note that
-- we're not doing the version range check, so if there's some
-- dependency that wasn't specified on the command line,
-- 'finalizePackageDescription' will fail.
--
-- TODO: mention '--exact-configuration' in the error message
-- when this fails?
--
-- (However, note that internal deps don't have to be
-- specified!)
(depName `Map.member` requiredDepsMap) || isInternalDep
| otherwise =
-- Normal operation: just look up dependency in the combined
-- package index.
not . null . PackageIndex.lookupDependency pkgs $ d
where
pkgs = PackageIndex.merge internalPackageSet installedPackageSet
isInternalDep = not . null
$ PackageIndex.lookupDependency internalPackageSet d
-- | Relax the dependencies of this package if needed.
relaxPackageDeps :: AllowNewer -> GenericPackageDescription
-> GenericPackageDescription
relaxPackageDeps AllowNewerNone gpd = gpd
relaxPackageDeps AllowNewerAll gpd = transformAllBuildDepends relaxAll gpd
where
relaxAll = \(Dependency pkgName verRange) ->
Dependency pkgName (removeUpperBound verRange)
relaxPackageDeps (AllowNewerSome allowNewerDeps') gpd =
transformAllBuildDepends relaxSome gpd
where
thisPkgName = packageName gpd
allowNewerDeps = mapMaybe f allowNewerDeps'
f (Setup.AllowNewerDep p) = Just p
f (Setup.AllowNewerDepScoped scope p) | scope == thisPkgName = Just p
| otherwise = Nothing
relaxSome = \d@(Dependency depName verRange) ->
if depName `elem` allowNewerDeps
then Dependency depName (removeUpperBound verRange)
else d
-- | Finalize a generic package description. The workhorse is
-- 'finalizePackageDescription' but there's a bit of other nattering
-- about necessary.
--
-- TODO: what exactly is the business with @flaggedTests@ and
-- @flaggedBenchmarks@?
configureFinalizedPackage
:: Verbosity
-> ConfigFlags
-> [Dependency]
-> (Dependency -> Bool) -- ^ tests if a dependency is satisfiable.
-- Might say it's satisfiable even when not.
-> Compiler
-> Platform
-> GenericPackageDescription
-> IO (PackageDescription, FlagAssignment)
configureFinalizedPackage verbosity cfg
allConstraints satisfies comp compPlatform pkg_descr0 = do
let enableTest t = t { testEnabled = fromFlag (configTests cfg) }
flaggedTests = map (\(n, t) -> (n, mapTreeData enableTest t))
(condTestSuites pkg_descr0)
enableBenchmark bm = bm { benchmarkEnabled =
fromFlag (configBenchmarks cfg) }
flaggedBenchmarks = map (\(n, bm) ->
(n, mapTreeData enableBenchmark bm))
(condBenchmarks pkg_descr0)
pkg_descr0'' = pkg_descr0 { condTestSuites = flaggedTests
, condBenchmarks = flaggedBenchmarks }
(pkg_descr0', flags) <-
case finalizePackageDescription
(configConfigurationsFlags cfg)
satisfies
compPlatform
(compilerInfo comp)
allConstraints
pkg_descr0''
of Right r -> return r
Left missing ->
die $ "Encountered missing dependencies:\n"
++ (render . nest 4 . sep . punctuate comma
. map (disp . simplifyDependency)
$ missing)
-- add extra include/lib dirs as specified in cfg
-- we do it here so that those get checked too
let pkg_descr = addExtraIncludeLibDirs pkg_descr0'
when (not (null flags)) $
info verbosity $ "Flags chosen: "
++ intercalate ", " [ name ++ "=" ++ display value
| (FlagName name, value) <- flags ]
return (pkg_descr, flags)
where
addExtraIncludeLibDirs pkg_descr =
let extraBi = mempty { extraLibDirs = configExtraLibDirs cfg
, extraFrameworkDirs = configExtraFrameworkDirs cfg
, PD.includeDirs = configExtraIncludeDirs cfg}
modifyLib l = l{ libBuildInfo = libBuildInfo l
`mappend` extraBi }
modifyExecutable e = e{ buildInfo = buildInfo e
`mappend` extraBi}
in pkg_descr{ library = modifyLib `fmap` library pkg_descr
, executables = modifyExecutable `map`
executables pkg_descr}
-- | Check for use of Cabal features which require compiler support
checkCompilerProblems :: Compiler -> PackageDescription -> IO ()
checkCompilerProblems comp pkg_descr = do
unless (renamingPackageFlagsSupported comp ||
and [ True
| bi <- allBuildInfo pkg_descr
, _ <- Map.elems (targetBuildRenaming bi)]) $
die $ "Your compiler does not support thinning and renaming on "
++ "package flags. To use this feature you probably must use "
++ "GHC 7.9 or later."
when (maybe False (not.null.PD.reexportedModules) (PD.library pkg_descr)
&& not (reexportedModulesSupported comp)) $ do
die $ "Your compiler does not support module re-exports. To use "
++ "this feature you probably must use GHC 7.9 or later."
-- | Select dependencies for the package.
configureDependencies
:: Verbosity
-> InstalledPackageIndex -- ^ internal packages
-> InstalledPackageIndex -- ^ installed packages
-> Map PackageName InstalledPackageInfo -- ^ required deps
-> PackageDescription
-> IO ([PackageId], [InstalledPackageInfo])
configureDependencies verbosity
internalPackageSet installedPackageSet requiredDepsMap pkg_descr = do
let selectDependencies :: [Dependency] ->
([FailedDependency], [ResolvedDependency])
selectDependencies =
partitionEithers
. map (selectDependency internalPackageSet installedPackageSet
requiredDepsMap)
(failedDeps, allPkgDeps) =
selectDependencies (buildDepends pkg_descr)
internalPkgDeps = [ pkgid
| InternalDependency _ pkgid <- allPkgDeps ]
externalPkgDeps = [ pkg
| ExternalDependency _ pkg <- allPkgDeps ]
when (not (null internalPkgDeps)
&& not (newPackageDepsBehaviour pkg_descr)) $
die $ "The field 'build-depends: "
++ intercalate ", " (map (display . packageName) internalPkgDeps)
++ "' refers to a library which is defined within the same "
++ "package. To use this feature the package must specify at "
++ "least 'cabal-version: >= 1.8'."
reportFailedDependencies failedDeps
reportSelectedDependencies verbosity allPkgDeps
return (internalPkgDeps, externalPkgDeps)
-- -----------------------------------------------------------------------------
-- Configuring package dependencies
reportProgram :: Verbosity -> Program -> Maybe ConfiguredProgram -> IO ()
reportProgram verbosity prog Nothing
= info verbosity $ "No " ++ programName prog ++ " found"
reportProgram verbosity prog (Just configuredProg)
= info verbosity $ "Using " ++ programName prog ++ version ++ location
where location = case programLocation configuredProg of
FoundOnSystem p -> " found on system at: " ++ p
UserSpecified p -> " given by user at: " ++ p
version = case programVersion configuredProg of
Nothing -> ""
Just v -> " version " ++ display v
hackageUrl :: String
hackageUrl = "http://hackage.haskell.org/package/"
data ResolvedDependency = ExternalDependency Dependency InstalledPackageInfo
| InternalDependency Dependency PackageId -- should be a
-- lib name
data FailedDependency = DependencyNotExists PackageName
| DependencyNoVersion Dependency
-- | Test for a package dependency and record the version we have installed.
selectDependency :: InstalledPackageIndex -- ^ Internally defined packages
-> InstalledPackageIndex -- ^ Installed packages
-> Map PackageName InstalledPackageInfo
-- ^ Packages for which we have been given specific deps to
-- use
-> Dependency
-> Either FailedDependency ResolvedDependency
selectDependency internalIndex installedIndex requiredDepsMap
dep@(Dependency pkgname vr) =
-- If the dependency specification matches anything in the internal package
-- index, then we prefer that match to anything in the second.
-- For example:
--
-- Name: MyLibrary
-- Version: 0.1
-- Library
-- ..
-- Executable my-exec
-- build-depends: MyLibrary
--
-- We want "build-depends: MyLibrary" always to match the internal library
-- even if there is a newer installed library "MyLibrary-0.2".
-- However, "build-depends: MyLibrary >= 0.2" should match the installed one.
case PackageIndex.lookupPackageName internalIndex pkgname of
[(_,[pkg])] | packageVersion pkg `withinRange` vr
-> Right $ InternalDependency dep (packageId pkg)
_ -> case Map.lookup pkgname requiredDepsMap of
-- If we know the exact pkg to use, then use it.
Just pkginstance -> Right (ExternalDependency dep pkginstance)
-- Otherwise we just pick an arbitrary instance of the latest version.
Nothing -> case PackageIndex.lookupDependency installedIndex dep of
[] -> Left $ DependencyNotExists pkgname
pkgs -> Right $ ExternalDependency dep $
case last pkgs of
(_ver, pkginstances) -> head pkginstances
reportSelectedDependencies :: Verbosity
-> [ResolvedDependency] -> IO ()
reportSelectedDependencies verbosity deps =
info verbosity $ unlines
[ "Dependency " ++ display (simplifyDependency dep)
++ ": using " ++ display pkgid
| resolved <- deps
, let (dep, pkgid) = case resolved of
ExternalDependency dep' pkg' -> (dep', packageId pkg')
InternalDependency dep' pkgid' -> (dep', pkgid') ]
reportFailedDependencies :: [FailedDependency] -> IO ()
reportFailedDependencies [] = return ()
reportFailedDependencies failed =
die (intercalate "\n\n" (map reportFailedDependency failed))
where
reportFailedDependency (DependencyNotExists pkgname) =
"there is no version of " ++ display pkgname ++ " installed.\n"
++ "Perhaps you need to download and install it from\n"
++ hackageUrl ++ display pkgname ++ "?"
reportFailedDependency (DependencyNoVersion dep) =
"cannot satisfy dependency " ++ display (simplifyDependency dep) ++ "\n"
-- | List all installed packages in the given package databases.
getInstalledPackages :: Verbosity -> Compiler
-> PackageDBStack -- ^ The stack of package databases.
-> ProgramConfiguration
-> IO InstalledPackageIndex
getInstalledPackages verbosity comp packageDBs progconf = do
when (null packageDBs) $
die $ "No package databases have been specified. If you use "
++ "--package-db=clear, you must follow it with --package-db= "
++ "with 'global', 'user' or a specific file."
info verbosity "Reading installed packages..."
case compilerFlavor comp of
GHC -> GHC.getInstalledPackages verbosity comp packageDBs progconf
GHCJS -> GHCJS.getInstalledPackages verbosity packageDBs progconf
JHC -> JHC.getInstalledPackages verbosity packageDBs progconf
LHC -> LHC.getInstalledPackages verbosity packageDBs progconf
UHC -> UHC.getInstalledPackages verbosity comp packageDBs progconf
HaskellSuite {} ->
HaskellSuite.getInstalledPackages verbosity packageDBs progconf
flv -> die $ "don't know how to find the installed packages for "
++ display flv
-- | Like 'getInstalledPackages', but for a single package DB.
--
-- NB: Why isn't this always a fall through to 'getInstalledPackages'?
-- That is because 'getInstalledPackages' performs some sanity checks
-- on the package database stack in question. However, when sandboxes
-- are involved these sanity checks are not desirable.
getPackageDBContents :: Verbosity -> Compiler
-> PackageDB -> ProgramConfiguration
-> IO InstalledPackageIndex
getPackageDBContents verbosity comp packageDB progconf = do
info verbosity "Reading installed packages..."
case compilerFlavor comp of
GHC -> GHC.getPackageDBContents verbosity packageDB progconf
GHCJS -> GHCJS.getPackageDBContents verbosity packageDB progconf
-- For other compilers, try to fall back on 'getInstalledPackages'.
_ -> getInstalledPackages verbosity comp [packageDB] progconf
-- | A set of files (or directories) that can be monitored to detect when
-- there might have been a change in the installed packages.
--
getInstalledPackagesMonitorFiles :: Verbosity -> Compiler
-> PackageDBStack
-> ProgramConfiguration -> Platform
-> IO [FilePath]
getInstalledPackagesMonitorFiles verbosity comp packageDBs progconf platform =
case compilerFlavor comp of
GHC -> GHC.getInstalledPackagesMonitorFiles
verbosity platform progconf packageDBs
other -> do
warn verbosity $ "don't know how to find change monitoring files for "
++ "the installed package databases for " ++ display other
return []
-- | The user interface specifies the package dbs to use with a combination of
-- @--global@, @--user@ and @--package-db=global|user|clear|$file@.
-- This function combines the global/user flag and interprets the package-db
-- flag into a single package db stack.
--
interpretPackageDbFlags :: Bool -> [Maybe PackageDB] -> PackageDBStack
interpretPackageDbFlags userInstall specificDBs =
extra initialStack specificDBs
where
initialStack | userInstall = [GlobalPackageDB, UserPackageDB]
| otherwise = [GlobalPackageDB]
extra dbs' [] = dbs'
extra _ (Nothing:dbs) = extra [] dbs
extra dbs' (Just db:dbs) = extra (dbs' ++ [db]) dbs
newPackageDepsBehaviourMinVersion :: Version
newPackageDepsBehaviourMinVersion = Version [1,7,1] []
-- In older cabal versions, there was only one set of package dependencies for
-- the whole package. In this version, we can have separate dependencies per
-- target, but we only enable this behaviour if the minimum cabal version
-- specified is >= a certain minimum. Otherwise, for compatibility we use the
-- old behaviour.
newPackageDepsBehaviour :: PackageDescription -> Bool
newPackageDepsBehaviour pkg =
specVersion pkg >= newPackageDepsBehaviourMinVersion
-- We are given both --constraint="foo < 2.0" style constraints and also
-- specific packages to pick via --dependency="foo=foo-2.0-177d5cdf20962d0581".
--
-- When finalising the package we have to take into account the specific
-- installed deps we've been given, and the finalise function expects
-- constraints, so we have to translate these deps into version constraints.
--
-- But after finalising we then have to make sure we pick the right specific
-- deps in the end. So we still need to remember which installed packages to
-- pick.
combinedConstraints :: [Dependency] ->
[(PackageName, UnitId)] ->
InstalledPackageIndex ->
Either String ([Dependency],
Map PackageName InstalledPackageInfo)
combinedConstraints constraints dependencies installedPackages = do
when (not (null badUnitIds)) $
Left $ render $ text "The following package dependencies were requested"
$+$ nest 4 (dispDependencies badUnitIds)
$+$ text "however the given installed package instance does not exist."
when (not (null badNames)) $
Left $ render $ text "The following package dependencies were requested"
$+$ nest 4 (dispDependencies badNames)
$+$ text ("however the installed package's name does not match "
++ "the name given.")
--TODO: we don't check that all dependencies are used!
return (allConstraints, idConstraintMap)
where
allConstraints :: [Dependency]
allConstraints = constraints
++ [ thisPackageVersion (packageId pkg)
| (_, _, Just pkg) <- dependenciesPkgInfo ]
idConstraintMap :: Map PackageName InstalledPackageInfo
idConstraintMap = Map.fromList
[ (packageName pkg, pkg)
| (_, _, Just pkg) <- dependenciesPkgInfo ]
-- The dependencies along with the installed package info, if it exists
dependenciesPkgInfo :: [(PackageName, UnitId,
Maybe InstalledPackageInfo)]
dependenciesPkgInfo =
[ (pkgname, ipkgid, mpkg)
| (pkgname, ipkgid) <- dependencies
, let mpkg = PackageIndex.lookupUnitId
installedPackages ipkgid
]
-- If we looked up a package specified by an installed package id
-- (i.e. someone has written a hash) and didn't find it then it's
-- an error.
badUnitIds =
[ (pkgname, ipkgid)
| (pkgname, ipkgid, Nothing) <- dependenciesPkgInfo ]
-- If someone has written e.g.
-- --dependency="foo=MyOtherLib-1.0-07...5bf30" then they have
-- probably made a mistake.
badNames =
[ (requestedPkgName, ipkgid)
| (requestedPkgName, ipkgid, Just pkg) <- dependenciesPkgInfo
, let foundPkgName = packageName pkg
, requestedPkgName /= foundPkgName ]
dispDependencies deps =
hsep [ text "--dependency="
<> quotes (disp pkgname <> char '=' <> disp ipkgid)
| (pkgname, ipkgid) <- deps ]
-- -----------------------------------------------------------------------------
-- Configuring program dependencies
configureRequiredPrograms :: Verbosity -> [Dependency] -> ProgramConfiguration
-> IO ProgramConfiguration
configureRequiredPrograms verbosity deps conf =
foldM (configureRequiredProgram verbosity) conf deps
configureRequiredProgram :: Verbosity -> ProgramConfiguration -> Dependency
-> IO ProgramConfiguration
configureRequiredProgram verbosity conf
(Dependency (PackageName progName) verRange) =
case lookupKnownProgram progName conf of
Nothing -> die ("Unknown build tool " ++ progName)
Just prog
-- requireProgramVersion always requires the program have a version
-- but if the user says "build-depends: foo" ie no version constraint
-- then we should not fail if we cannot discover the program version.
| verRange == anyVersion -> do
(_, conf') <- requireProgram verbosity prog conf
return conf'
| otherwise -> do
(_, _, conf') <- requireProgramVersion verbosity prog verRange conf
return conf'
-- -----------------------------------------------------------------------------
-- Configuring pkg-config package dependencies
configurePkgconfigPackages :: Verbosity -> PackageDescription
-> ProgramConfiguration
-> IO (PackageDescription, ProgramConfiguration)
configurePkgconfigPackages verbosity pkg_descr conf
| null allpkgs = return (pkg_descr, conf)
| otherwise = do
(_, _, conf') <- requireProgramVersion
(lessVerbose verbosity) pkgConfigProgram
(orLaterVersion $ Version [0,9,0] []) conf
mapM_ requirePkg allpkgs
lib' <- mapM addPkgConfigBILib (library pkg_descr)
exes' <- mapM addPkgConfigBIExe (executables pkg_descr)
tests' <- mapM addPkgConfigBITest (testSuites pkg_descr)
benches' <- mapM addPkgConfigBIBench (benchmarks pkg_descr)
let pkg_descr' = pkg_descr { library = lib', executables = exes',
testSuites = tests', benchmarks = benches' }
return (pkg_descr', conf')
where
allpkgs = concatMap pkgconfigDepends (allBuildInfo pkg_descr)
pkgconfig = rawSystemProgramStdoutConf (lessVerbose verbosity)
pkgConfigProgram conf
requirePkg dep@(Dependency (PackageName pkg) range) = do
version <- pkgconfig ["--modversion", pkg]
`catchIO` (\_ -> die notFound)
`catchExit` (\_ -> die notFound)
case simpleParse version of
Nothing -> die "parsing output of pkg-config --modversion failed"
Just v | not (withinRange v range) -> die (badVersion v)
| otherwise -> info verbosity (depSatisfied v)
where
notFound = "The pkg-config package '" ++ pkg ++ "'"
++ versionRequirement
++ " is required but it could not be found."
badVersion v = "The pkg-config package '" ++ pkg ++ "'"
++ versionRequirement
++ " is required but the version installed on the"
++ " system is version " ++ display v
depSatisfied v = "Dependency " ++ display dep
++ ": using version " ++ display v
versionRequirement
| isAnyVersion range = ""
| otherwise = " version " ++ display range
-- Adds pkgconfig dependencies to the build info for a component
addPkgConfigBI compBI setCompBI comp = do
bi <- pkgconfigBuildInfo (pkgconfigDepends (compBI comp))
return $ setCompBI comp (compBI comp `mappend` bi)
-- Adds pkgconfig dependencies to the build info for a library
addPkgConfigBILib = addPkgConfigBI libBuildInfo $
\lib bi -> lib { libBuildInfo = bi }
-- Adds pkgconfig dependencies to the build info for an executable
addPkgConfigBIExe = addPkgConfigBI buildInfo $
\exe bi -> exe { buildInfo = bi }
-- Adds pkgconfig dependencies to the build info for a test suite
addPkgConfigBITest = addPkgConfigBI testBuildInfo $
\test bi -> test { testBuildInfo = bi }
-- Adds pkgconfig dependencies to the build info for a benchmark
addPkgConfigBIBench = addPkgConfigBI benchmarkBuildInfo $
\bench bi -> bench { benchmarkBuildInfo = bi }
pkgconfigBuildInfo :: [Dependency] -> IO BuildInfo
pkgconfigBuildInfo [] = return Mon.mempty
pkgconfigBuildInfo pkgdeps = do
let pkgs = nub [ display pkg | Dependency pkg _ <- pkgdeps ]
ccflags <- pkgconfig ("--cflags" : pkgs)
ldflags <- pkgconfig ("--libs" : pkgs)
return (ccLdOptionsBuildInfo (words ccflags) (words ldflags))
-- | Makes a 'BuildInfo' from C compiler and linker flags.
--
-- This can be used with the output from configuration programs like pkg-config
-- and similar package-specific programs like mysql-config, freealut-config etc.
-- For example:
--
-- > ccflags <- rawSystemProgramStdoutConf verbosity prog conf ["--cflags"]
-- > ldflags <- rawSystemProgramStdoutConf verbosity prog conf ["--libs"]
-- > return (ccldOptionsBuildInfo (words ccflags) (words ldflags))
--
ccLdOptionsBuildInfo :: [String] -> [String] -> BuildInfo
ccLdOptionsBuildInfo cflags ldflags =
let (includeDirs', cflags') = partition ("-I" `isPrefixOf`) cflags
(extraLibs', ldflags') = partition ("-l" `isPrefixOf`) ldflags
(extraLibDirs', ldflags'') = partition ("-L" `isPrefixOf`) ldflags'
in mempty {
PD.includeDirs = map (drop 2) includeDirs',
PD.extraLibs = map (drop 2) extraLibs',
PD.extraLibDirs = map (drop 2) extraLibDirs',
PD.ccOptions = cflags',
PD.ldOptions = ldflags''
}
-- -----------------------------------------------------------------------------
-- Determining the compiler details
configCompilerAuxEx :: ConfigFlags
-> IO (Compiler, Platform, ProgramConfiguration)
configCompilerAuxEx cfg = configCompilerEx (flagToMaybe $ configHcFlavor cfg)
(flagToMaybe $ configHcPath cfg)
(flagToMaybe $ configHcPkg cfg)
programsConfig
(fromFlag (configVerbosity cfg))
where
programsConfig = mkProgramsConfig cfg defaultProgramConfiguration
configCompilerEx :: Maybe CompilerFlavor -> Maybe FilePath -> Maybe FilePath
-> ProgramConfiguration -> Verbosity
-> IO (Compiler, Platform, ProgramConfiguration)
configCompilerEx Nothing _ _ _ _ = die "Unknown compiler"
configCompilerEx (Just hcFlavor) hcPath hcPkg conf verbosity = do
(comp, maybePlatform, programsConfig) <- case hcFlavor of
GHC -> GHC.configure verbosity hcPath hcPkg conf
GHCJS -> GHCJS.configure verbosity hcPath hcPkg conf
JHC -> JHC.configure verbosity hcPath hcPkg conf
LHC -> do (_, _, ghcConf) <- GHC.configure verbosity Nothing hcPkg conf
LHC.configure verbosity hcPath Nothing ghcConf
UHC -> UHC.configure verbosity hcPath hcPkg conf
HaskellSuite {} -> HaskellSuite.configure verbosity hcPath hcPkg conf
_ -> die "Unknown compiler"
return (comp, fromMaybe buildPlatform maybePlatform, programsConfig)
-- Ideally we would like to not have separate configCompiler* and
-- configCompiler*Ex sets of functions, but there are many custom setup scripts
-- in the wild that are using them, so the versions with old types are kept for
-- backwards compatibility. Platform was added to the return triple in 1.18.
{-# DEPRECATED configCompiler
"'configCompiler' is deprecated. Use 'configCompilerEx' instead." #-}
configCompiler :: Maybe CompilerFlavor -> Maybe FilePath -> Maybe FilePath
-> ProgramConfiguration -> Verbosity
-> IO (Compiler, ProgramConfiguration)
configCompiler mFlavor hcPath hcPkg conf verbosity =
fmap (\(a,_,b) -> (a,b)) $ configCompilerEx mFlavor hcPath hcPkg conf verbosity
{-# DEPRECATED configCompilerAux
"configCompilerAux is deprecated. Use 'configCompilerAuxEx' instead." #-}
configCompilerAux :: ConfigFlags
-> IO (Compiler, ProgramConfiguration)
configCompilerAux = fmap (\(a,_,b) -> (a,b)) . configCompilerAuxEx
-- -----------------------------------------------------------------------------
-- Making the internal component graph
mkComponentsGraph :: PackageDescription
-> [PackageId]
-> Either [ComponentName]
[(Component, [ComponentName])]
mkComponentsGraph pkg_descr internalPkgDeps =
let graph = [ (c, componentName c, componentDeps c)
| c <- pkgEnabledComponents pkg_descr ]
in case checkComponentsCyclic graph of
Just ccycle -> Left [ cname | (_,cname,_) <- ccycle ]
Nothing -> Right [ (c, cdeps) | (c, _, cdeps) <- graph ]
where
-- The dependencies for the given component
componentDeps component =
[ CExeName toolname | Dependency (PackageName toolname) _
<- buildTools bi
, toolname `elem` map exeName
(executables pkg_descr) ]
++ [ CLibName | Dependency pkgname _ <- targetBuildDepends bi
, pkgname `elem` map packageName internalPkgDeps ]
where
bi = componentBuildInfo component
reportComponentCycle :: [ComponentName] -> IO a
reportComponentCycle cnames =
die $ "Components in the package depend on each other in a cyclic way:\n "
++ intercalate " depends on "
[ "'" ++ showComponentName cname ++ "'"
| cname <- cnames ++ [head cnames] ]
-- | This method computes a default, "good enough" 'ComponentId'
-- for a package. The intent is that cabal-install (or the user) will
-- specify a more detailed IPID via the @--ipid@ flag if necessary.
computeComponentId :: PackageIdentifier
-> ComponentName
-- TODO: careful here!
-> [ComponentId] -- IPIDs of the component dependencies
-> FlagAssignment
-> ComponentId
computeComponentId pid cname dep_ipids flagAssignment = do
-- show is found to be faster than intercalate and then replacement of
-- special character used in intercalating. We cannot simply hash by
-- doubly concating list, as it just flatten out the nested list, so
-- different sources can produce same hash
let hash = hashToBase62 $
-- For safety, include the package + version here
-- for GHC 7.10, where just the hash is used as
-- the package key
(display pid)
++ (show $ dep_ipids)
++ show flagAssignment
ComponentId $
display pid
++ "-" ++ hash
++ (case cname of
CLibName -> ""
-- TODO: these could result in non-parseable IPIDs
-- since the component name format is very flexible
CExeName s -> "-" ++ s ++ ".exe"
CTestName s -> "-" ++ s ++ ".test"
CBenchName s -> "-" ++ s ++ ".bench")
hashToBase62 :: String -> String
hashToBase62 s = showFingerprint $ fingerprintString s
where
showIntAtBase62 x = showIntAtBase 62 representBase62 x ""
representBase62 x
| x < 10 = chr (48 + x)
| x < 36 = chr (65 + x - 10)
| x < 62 = chr (97 + x - 36)
| otherwise = '@'
showFingerprint (Fingerprint a b) = showIntAtBase62 a ++ showIntAtBase62 b
-- | In GHC 8.0, the string we pass to GHC to use for symbol
-- names for a package can be an arbitrary, IPID-compatible string.
-- However, prior to GHC 8.0 there are some restrictions on what
-- format this string can be (due to how ghc-pkg parsed the key):
--
-- 1. In GHC 7.10, the string had either be of the form
-- foo_ABCD, where foo is a non-semantic alphanumeric/hyphenated
-- prefix and ABCD is two base-64 encoded 64-bit integers,
-- or a GHC 7.8 style identifier.
--
-- 2. In GHC 7.8, the string had to be a valid package identifier
-- like foo-0.1.
--
-- So, the problem is that Cabal, in general, has a general IPID,
-- but needs to figure out a package key / package ID that the
-- old ghc-pkg will actually accept. But there's an EVERY WORSE
-- problem: if ghc-pkg decides to parse an identifier foo-0.1-xxx
-- as if it were a package identifier, which means it will SILENTLY
-- DROP the "xxx" (because it's a tag, and Cabal does not allow tags.)
-- So we must CONNIVE to ensure that we don't pick something that
-- looks like this.
--
-- So this function attempts to define a mapping into the old formats.
--
-- The mapping for GHC 7.8 and before:
--
-- * For CLibName, we unconditionally use the 'PackageIdentifier'.
--
-- * For sub-components, we create a new 'PackageIdentifier' which
-- is encoded in the following way. The test suite "qux" in package
-- "foobar-0.2" gets this package identifier "z-foobar-z-test-qux-0.2".
-- These package IDs have the form:
--
-- cpid ::= "z-" package-id "-z-" component-type "-" component-name
-- component-type ::= "test" | "bench" | "exe" | "lib"
-- package-id and component-name have "-" ( "z" + ) "-"
-- segments encoded by adding an extra "z".
--
-- The mapping for GHC 7.10:
--
-- * For CLibName:
-- If the IPID is of the form foo-0.1-ABCDEF where foo_ABCDEF would
-- validly parse as a package key, we pass "ABCDEF". (NB: not
-- all hashes parse this way, because GHC 7.10 mandated that
-- these hashes be two base-62 encoded 64 bit integers),
-- but hashes that Cabal generated using 'computeComponentId'
-- are guaranteed to have this form.
--
-- If it is not of this form, we rehash the IPID into the
-- correct form and pass that.
--
-- * For sub-components, we rehash the IPID into the correct format
-- and pass that.
--
computeCompatPackageKey
:: Compiler
-> PackageIdentifier
-> ComponentName
-> UnitId
-> (PackageName, String)
computeCompatPackageKey comp pid cname uid@(SimpleUnitId (ComponentId str))
| not (packageKeySupported comp || unitIdSupported comp) =
-- NB: the package ID in the database entry has to follow this
let zdashcode s = go s (Nothing :: Maybe Int) []
where go [] _ r = reverse r
go ('-':z) (Just n) r | n > 0 = go z (Just 0) ('-':'z':r)
go ('-':z) _ r = go z (Just 0) ('-':r)
go ('z':z) (Just n) r = go z (Just (n+1)) ('z':r)
go (c:z) _ r = go z Nothing (c:r)
cname_str = case cname of
CLibName -> error "computeCompatPackageKey"
CTestName n -> "-z-test-" ++ zdashcode n
CBenchName n -> "-z-bench-" ++ zdashcode n
CExeName n -> "-z-exe-" ++ zdashcode n
package_name
| cname == CLibName = pkgName pid
| otherwise = PackageName $ "z-"
++ zdashcode (display (pkgName pid))
++ zdashcode cname_str
old_style_key
| cname == CLibName = display pid
| otherwise = display package_name ++ "-"
++ display (pkgVersion pid)
in (package_name, old_style_key)
| not (unifiedIPIDRequired comp) =
let mb_verbatim_key
= case simpleParse str :: Maybe PackageId of
-- Something like 'foo-0.1', use it verbatim.
-- (NB: hash tags look like tags, so they are parsed,
-- so the extra equality check tests if a tag was dropped.)
Just pid0 | display pid0 == str -> Just str
_ -> Nothing
mb_truncated_key
= let cand = reverse (takeWhile isAlphaNum (reverse str))
in if length cand == 22 && all isAlphaNum cand
then Just cand
else Nothing
rehashed_key = hashToBase62 str
in (pkgName pid, fromMaybe rehashed_key
(mb_verbatim_key `mplus` mb_truncated_key))
| otherwise = (pkgName pid, display uid)
mkComponentsLocalBuildInfo :: ConfigFlags
-> Compiler
-> InstalledPackageIndex
-> PackageDescription
-> [PackageId] -- internal package deps
-> [InstalledPackageInfo] -- external package deps
-> [(Component, [ComponentName])]
-> FlagAssignment
-> IO [(ComponentName, ComponentLocalBuildInfo,
[ComponentName])]
mkComponentsLocalBuildInfo cfg comp installedPackages pkg_descr
internalPkgDeps externalPkgDeps
graph flagAssignment = do
-- Pre-compute library hash so we can setup internal deps
-- TODO configIPID should have name changed
let cid = case configIPID cfg of
Flag cid0 ->
-- Hack to reuse install dirs machinery
-- NB: no real IPID available at this point
let env = packageTemplateEnv (package pkg_descr)
(mkUnitId "")
str = fromPathTemplate
(InstallDirs.substPathTemplate env
(toPathTemplate cid0))
in ComponentId str
_ ->
computeComponentId (package pkg_descr) CLibName
(getDeps CLibName) flagAssignment
uid = SimpleUnitId cid
(_, compat_key) = computeCompatPackageKey comp
(package pkg_descr) CLibName uid
sequence
[ do clbi <- componentLocalBuildInfo uid compat_key c
return (componentName c, clbi, cdeps)
| (c, cdeps) <- graph ]
where
getDeps cname =
let externalPkgs = maybe [] (\lib -> selectSubset
(componentBuildInfo lib)
externalPkgDeps)
(lookupComponent pkg_descr cname)
in map Installed.installedComponentId externalPkgs
-- The allPkgDeps contains all the package deps for the whole package
-- but we need to select the subset for this specific component.
-- we just take the subset for the package names this component
-- needs. Note, this only works because we cannot yet depend on two
-- versions of the same package.
componentLocalBuildInfo uid compat_key component =
case component of
CLib lib -> do
let exports = map (\n -> Installed.ExposedModule n Nothing)
(PD.exposedModules lib)
let mb_reexports = resolveModuleReexports installedPackages
(packageId pkg_descr)
uid
externalPkgDeps lib
reexports <- case mb_reexports of
Left problems -> reportModuleReexportProblems problems
Right r -> return r
return LibComponentLocalBuildInfo {
componentPackageDeps = cpds,
componentUnitId = uid,
componentCompatPackageKey = compat_key,
componentPackageRenaming = cprns,
componentExposedModules = exports ++ reexports
}
CExe _ ->
return ExeComponentLocalBuildInfo {
componentPackageDeps = cpds,
componentPackageRenaming = cprns
}
CTest _ ->
return TestComponentLocalBuildInfo {
componentPackageDeps = cpds,
componentPackageRenaming = cprns
}
CBench _ ->
return BenchComponentLocalBuildInfo {
componentPackageDeps = cpds,
componentPackageRenaming = cprns
}
where
bi = componentBuildInfo component
dedup = Map.toList . Map.fromList
cpds = if newPackageDepsBehaviour pkg_descr
then dedup $
[ (Installed.installedUnitId pkg, packageId pkg)
| pkg <- selectSubset bi externalPkgDeps ]
++ [ (uid, pkgid)
| pkgid <- selectSubset bi internalPkgDeps ]
else [ (Installed.installedUnitId pkg, packageId pkg)
| pkg <- externalPkgDeps ]
cprns = if newPackageDepsBehaviour pkg_descr
then targetBuildRenaming bi
-- Hack: if we have old package-deps behavior, it's impossible
-- for non-default renamings to be used, because the Cabal
-- version is too early. This is a good, because while all the
-- deps were bundled up in buildDepends, we didn't do this for
-- renamings, so it's not even clear how to get the merged
-- version. So just assume that all of them are the default..
else Map.fromList (map (\(_,pid) ->
(packageName pid, defaultRenaming)) cpds)
selectSubset :: Package pkg => BuildInfo -> [pkg] -> [pkg]
selectSubset bi pkgs =
[ pkg | pkg <- pkgs, packageName pkg `elem` names bi ]
names bi = [ name | Dependency name _ <- targetBuildDepends bi ]
-- | Given the author-specified re-export declarations from the .cabal file,
-- resolve them to the form that we need for the package database.
--
-- An invariant of the package database is that we always link the re-export
-- directly to its original defining location (rather than indirectly via a
-- chain of re-exporting packages).
--
resolveModuleReexports :: InstalledPackageIndex
-> PackageId
-> UnitId
-> [InstalledPackageInfo]
-> Library
-> Either [(ModuleReexport, String)] -- errors
[Installed.ExposedModule] -- ok
resolveModuleReexports installedPackages srcpkgid key externalPkgDeps lib =
case partitionEithers
(map resolveModuleReexport (PD.reexportedModules lib)) of
([], ok) -> Right ok
(errs, _) -> Left errs
where
-- A mapping from visible module names to their original defining
-- module name. We also record the package name of the package which
-- *immediately* provided the module (not the original) to handle if the
-- user explicitly says which build-depends they want to reexport from.
visibleModules :: Map ModuleName [(PackageName, Installed.ExposedModule)]
visibleModules =
Map.fromListWith (++) $
[ (Installed.exposedName exposedModule, [(exportingPackageName,
exposedModule)])
-- The package index here contains all the indirect deps of the
-- package we're configuring, but we want just the direct deps
| let directDeps = Set.fromList
(map Installed.installedUnitId externalPkgDeps)
, pkg <- PackageIndex.allPackages installedPackages
, Installed.installedUnitId pkg `Set.member` directDeps
, let exportingPackageName = packageName pkg
, exposedModule <- visibleModuleDetails pkg
]
++ [ (visibleModuleName, [(exportingPackageName, exposedModule)])
| visibleModuleName <- PD.exposedModules lib
++ otherModules (libBuildInfo lib)
, let exportingPackageName = packageName srcpkgid
definingModuleName = visibleModuleName
definingPackageId = key
originalModule = Installed.OriginalModule definingPackageId
definingModuleName
exposedModule = Installed.ExposedModule visibleModuleName
(Just originalModule)
]
-- All the modules exported from this package and their defining name and
-- package (either defined here in this package or re-exported from some
-- other package). Return an ExposedModule because we want to hold onto
-- signature information.
visibleModuleDetails :: InstalledPackageInfo -> [Installed.ExposedModule]
visibleModuleDetails pkg = do
exposedModule <- Installed.exposedModules pkg
case Installed.exposedReexport exposedModule of
-- The first case is the modules actually defined in this package.
-- In this case the reexport will point to this package.
Nothing -> return exposedModule {
Installed.exposedReexport =
Just (Installed.OriginalModule
(Installed.installedUnitId pkg)
(Installed.exposedName exposedModule)) }
-- On the other hand, a visible module might actually be itself
-- a re-export! In this case, the re-export info for the package
-- doing the re-export will point us to the original defining
-- module name and package, so we can reuse the entry.
Just _ -> return exposedModule
resolveModuleReexport reexport@ModuleReexport {
moduleReexportOriginalPackage = moriginalPackageName,
moduleReexportOriginalName = originalName,
moduleReexportName = newName
} =
let filterForSpecificPackage =
case moriginalPackageName of
Nothing -> id
Just originalPackageName ->
filter (\(pkgname, _) -> pkgname == originalPackageName)
matches = filterForSpecificPackage
(Map.findWithDefault [] originalName visibleModules)
in
case (matches, moriginalPackageName) of
((_, exposedModule):rest, _)
-- TODO: Refine this check for signatures
| all (\(_, exposedModule') ->
Installed.exposedReexport exposedModule
== Installed.exposedReexport exposedModule') rest
-> Right exposedModule { Installed.exposedName = newName }
([], Just originalPackageName)
-> Left $ (,) reexport
$ "The package " ++ display originalPackageName
++ " does not export a module " ++ display originalName
([], Nothing)
-> Left $ (,) reexport
$ "The module " ++ display originalName
++ " is not exported by any suitable package (this package "
++ "itself nor any of its 'build-depends' dependencies)."
(ms, _)
-> Left $ (,) reexport
$ "The module " ++ display originalName ++ " is exported "
++ "by more than one package ("
++ intercalate ", " [ display pkgname | (pkgname,_) <- ms ]
++ ") and so the re-export is ambiguous. The ambiguity can "
++ "be resolved by qualifying by the package name. The "
++ "syntax is 'packagename:moduleName [as newname]'."
-- Note: if in future Cabal allows directly depending on multiple
-- instances of the same package (e.g. backpack) then an additional
-- ambiguity case is possible here: (_, Just originalPackageName)
-- with the module being ambiguous despite being qualified by a
-- package name. Presumably by that time we'll have a mechanism to
-- qualify the instance we're referring to.
reportModuleReexportProblems :: [(ModuleReexport, String)] -> IO a
reportModuleReexportProblems reexportProblems =
die $ unlines
[ "Problem with the module re-export '" ++ display reexport ++ "': " ++ msg
| (reexport, msg) <- reexportProblems ]
-- -----------------------------------------------------------------------------
-- Testing C lib and header dependencies
-- Try to build a test C program which includes every header and links every
-- lib. If that fails, try to narrow it down by preprocessing (only) and linking
-- with individual headers and libs. If none is the obvious culprit then give a
-- generic error message.
-- TODO: produce a log file from the compiler errors, if any.
checkForeignDeps :: PackageDescription -> LocalBuildInfo -> Verbosity -> IO ()
checkForeignDeps pkg lbi verbosity = do
ifBuildsWith allHeaders (commonCcArgs ++ makeLdArgs allLibs) -- I'm feeling
-- lucky
(return ())
(do missingLibs <- findMissingLibs
missingHdr <- findOffendingHdr
explainErrors missingHdr missingLibs)
where
allHeaders = collectField PD.includes
allLibs = collectField PD.extraLibs
ifBuildsWith headers args success failure = do
ok <- builds (makeProgram headers) args
if ok then success else failure
findOffendingHdr =
ifBuildsWith allHeaders ccArgs
(return Nothing)
(go . tail . inits $ allHeaders)
where
go [] = return Nothing -- cannot happen
go (hdrs:hdrsInits) =
-- Try just preprocessing first
ifBuildsWith hdrs cppArgs
-- If that works, try compiling too
(ifBuildsWith hdrs ccArgs
(go hdrsInits)
(return . Just . Right . last $ hdrs))
(return . Just . Left . last $ hdrs)
cppArgs = "-E":commonCppArgs -- preprocess only
ccArgs = "-c":commonCcArgs -- don't try to link
findMissingLibs = ifBuildsWith [] (makeLdArgs allLibs)
(return [])
(filterM (fmap not . libExists) allLibs)
libExists lib = builds (makeProgram []) (makeLdArgs [lib])
commonCppArgs = platformDefines lbi
++ [ "-I" ++ autogenModulesDir lbi ]
++ [ "-I" ++ dir | dir <- collectField PD.includeDirs ]
++ ["-I."]
++ collectField PD.cppOptions
++ collectField PD.ccOptions
++ [ "-I" ++ dir
| dep <- deps
, dir <- Installed.includeDirs dep ]
++ [ opt
| dep <- deps
, opt <- Installed.ccOptions dep ]
commonCcArgs = commonCppArgs
++ collectField PD.ccOptions
++ [ opt
| dep <- deps
, opt <- Installed.ccOptions dep ]
commonLdArgs = [ "-L" ++ dir | dir <- collectField PD.extraLibDirs ]
++ collectField PD.ldOptions
++ [ "-L" ++ dir
| dep <- deps
, dir <- Installed.libraryDirs dep ]
--TODO: do we also need dependent packages' ld options?
makeLdArgs libs = [ "-l"++lib | lib <- libs ] ++ commonLdArgs
makeProgram hdrs = unlines $
[ "#include \"" ++ hdr ++ "\"" | hdr <- hdrs ] ++
["int main(int argc, char** argv) { return 0; }"]
collectField f = concatMap f allBi
allBi = allBuildInfo pkg
deps = PackageIndex.topologicalOrder (installedPkgs lbi)
builds program args = do
tempDir <- getTemporaryDirectory
withTempFile tempDir ".c" $ \cName cHnd ->
withTempFile tempDir "" $ \oNname oHnd -> do
hPutStrLn cHnd program
hClose cHnd
hClose oHnd
_ <- rawSystemProgramStdoutConf verbosity
gccProgram (withPrograms lbi) (cName:"-o":oNname:args)
return True
`catchIO` (\_ -> return False)
`catchExit` (\_ -> return False)
explainErrors Nothing [] = return () -- should be impossible!
explainErrors _ _
| isNothing . lookupProgram gccProgram . withPrograms $ lbi
= die $ unlines $
[ "No working gcc",
"This package depends on foreign library but we cannot "
++ "find a working C compiler. If you have it in a "
++ "non-standard location you can use the --with-gcc "
++ "flag to specify it." ]
explainErrors hdr libs = die $ unlines $
[ if plural
then "Missing dependencies on foreign libraries:"
else "Missing dependency on a foreign library:"
| missing ]
++ case hdr of
Just (Left h) -> ["* Missing (or bad) header file: " ++ h ]
_ -> []
++ case libs of
[] -> []
[lib] -> ["* Missing C library: " ++ lib]
_ -> ["* Missing C libraries: " ++ intercalate ", " libs]
++ [if plural then messagePlural else messageSingular | missing]
++ case hdr of
Just (Left _) -> [ headerCppMessage ]
Just (Right h) -> [ (if missing then "* " else "")
++ "Bad header file: " ++ h
, headerCcMessage ]
_ -> []
where
plural = length libs >= 2
-- Is there something missing? (as opposed to broken)
missing = not (null libs)
|| case hdr of Just (Left _) -> True; _ -> False
messageSingular =
"This problem can usually be solved by installing the system "
++ "package that provides this library (you may need the "
++ "\"-dev\" version). If the library is already installed "
++ "but in a non-standard location then you can use the flags "
++ "--extra-include-dirs= and --extra-lib-dirs= to specify "
++ "where it is."
messagePlural =
"This problem can usually be solved by installing the system "
++ "packages that provide these libraries (you may need the "
++ "\"-dev\" versions). If the libraries are already installed "
++ "but in a non-standard location then you can use the flags "
++ "--extra-include-dirs= and --extra-lib-dirs= to specify "
++ "where they are."
headerCppMessage =
"If the header file does exist, it may contain errors that "
++ "are caught by the C compiler at the preprocessing stage. "
++ "In this case you can re-run configure with the verbosity "
++ "flag -v3 to see the error messages."
headerCcMessage =
"The header file contains a compile error. "
++ "You can re-run configure with the verbosity flag "
++ "-v3 to see the error messages from the C compiler."
-- | Output package check warnings and errors. Exit if any errors.
checkPackageProblems :: Verbosity
-> GenericPackageDescription
-> PackageDescription
-> IO ()
checkPackageProblems verbosity gpkg pkg = do
ioChecks <- checkPackageFiles pkg "."
let pureChecks = checkPackage gpkg (Just pkg)
errors = [ e | PackageBuildImpossible e <- pureChecks ++ ioChecks ]
warnings = [ w | PackageBuildWarning w <- pureChecks ++ ioChecks ]
if null errors
then mapM_ (warn verbosity) warnings
else die (intercalate "\n\n" errors)
-- | Preform checks if a relocatable build is allowed
checkRelocatable :: Verbosity
-> PackageDescription
-> LocalBuildInfo
-> IO ()
checkRelocatable verbosity pkg lbi
= sequence_ [ checkOS
, checkCompiler
, packagePrefixRelative
, depsPrefixRelative
]
where
-- Check if the OS support relocatable builds.
--
-- If you add new OS' to this list, and your OS supports dynamic libraries
-- and RPATH, make sure you add your OS to RPATH-support list of:
-- Distribution.Simple.GHC.getRPaths
checkOS
= unless (os `elem` [ OSX, Linux ])
$ die $ "Operating system: " ++ display os ++
", does not support relocatable builds"
where
(Platform _ os) = hostPlatform lbi
-- Check if the Compiler support relocatable builds
checkCompiler
= unless (compilerFlavor comp `elem` [ GHC ])
$ die $ "Compiler: " ++ show comp ++
", does not support relocatable builds"
where
comp = compiler lbi
-- Check if all the install dirs are relative to same prefix
packagePrefixRelative
= unless (relativeInstallDirs installDirs)
$ die $ "Installation directories are not prefix_relative:\n" ++
show installDirs
where
installDirs = absoluteInstallDirs pkg lbi NoCopyDest
p = prefix installDirs
relativeInstallDirs (InstallDirs {..}) =
all isJust
(fmap (stripPrefix p)
[ bindir, libdir, dynlibdir, libexecdir, includedir, datadir
, docdir, mandir, htmldir, haddockdir, sysconfdir] )
-- Check if the library dirs of the dependencies that are in the package
-- database to which the package is installed are relative to the
-- prefix of the package
depsPrefixRelative = do
pkgr <- GHC.pkgRoot verbosity lbi (last (withPackageDB lbi))
mapM_ (doCheck pkgr) ipkgs
where
doCheck pkgr ipkg
| maybe False (== pkgr) (Installed.pkgRoot ipkg)
= mapM_ (\l -> when (isNothing $ stripPrefix p l) (die (msg l)))
(Installed.libraryDirs ipkg)
| otherwise
= return ()
installDirs = absoluteInstallDirs pkg lbi NoCopyDest
p = prefix installDirs
ipkgs = PackageIndex.allPackages (installedPkgs lbi)
msg l = "Library directory of a dependency: " ++ show l ++
"\nis not relative to the installation prefix:\n" ++
show p
| tolysz/prepare-ghcjs | spec-lts8/cabal/Cabal/Distribution/Simple/Configure.hs | bsd-3-clause | 94,037 | 16 | 24 | 28,828 | 15,541 | 8,121 | 7,420 | 1,360 | 15 |
{-# Language PatternGuards #-}
module Blub
( blub
, foo
, bar
) where
import Ugah.Foo
import Control.Applicative
import Ugah.Blub (a, b, c,
d, e, f)
f :: Int -> Int
f = (+ 3)
g :: Int
g =
where
| jystic/hsimport | tests/inputFiles/SymbolTest27.hs | bsd-3-clause | 228 | 0 | 5 | 75 | 79 | 50 | 29 | -1 | -1 |
{-# LANGUAGE CPP #-}
-----------------------------------------------------------------------------
-- |
-- License : BSD-3-Clause
-- Maintainer : Oleg Grenrus <[email protected]>
--
-- The repo commits API as described on
-- <http://developer.github.com/v3/repos/commits/>.
module GitHub.Endpoints.Repos.Commits (
CommitQueryOption(..),
commitsForR,
commitsWithOptionsForR,
commitR,
diffR,
module GitHub.Data,
) where
import GitHub.Data
import GitHub.Internal.Prelude
import Prelude ()
import qualified Data.ByteString as BS
import qualified Data.Text as T
import qualified Data.Text.Encoding as TE
renderCommitQueryOption :: CommitQueryOption -> (BS.ByteString, Maybe BS.ByteString)
renderCommitQueryOption (CommitQuerySha sha) = ("sha", Just $ TE.encodeUtf8 sha)
renderCommitQueryOption (CommitQueryPath path) = ("path", Just $ TE.encodeUtf8 path)
renderCommitQueryOption (CommitQueryAuthor author) = ("author", Just $ TE.encodeUtf8 author)
renderCommitQueryOption (CommitQuerySince date) = ("since", Just $ TE.encodeUtf8 . T.pack $ formatISO8601 date)
renderCommitQueryOption (CommitQueryUntil date) = ("until", Just $ TE.encodeUtf8 . T.pack $ formatISO8601 date)
-- | List commits on a repository.
-- See <https://developer.github.com/v3/repos/commits/#list-commits-on-a-repository>
commitsForR :: Name Owner -> Name Repo -> FetchCount -> Request k (Vector Commit)
commitsForR user repo limit = commitsWithOptionsForR user repo limit []
-- | List commits on a repository.
-- See <https://developer.github.com/v3/repos/commits/#list-commits-on-a-repository>
commitsWithOptionsForR :: Name Owner -> Name Repo -> FetchCount -> [CommitQueryOption] -> Request k (Vector Commit)
commitsWithOptionsForR user repo limit opts =
pagedQuery ["repos", toPathPart user, toPathPart repo, "commits"] qs limit
where
qs = map renderCommitQueryOption opts
-- | Query a single commit.
-- See <https://developer.github.com/v3/repos/commits/#get-a-single-commit>
commitR :: Name Owner -> Name Repo -> Name Commit -> Request k Commit
commitR user repo sha =
query ["repos", toPathPart user, toPathPart repo, "commits", toPathPart sha] []
-- | Compare two commits.
-- See <https://developer.github.com/v3/repos/commits/#compare-two-commits>
diffR :: Name Owner -> Name Repo -> Name Commit -> Name Commit -> Request k Diff
diffR user repo base headref =
query ["repos", toPathPart user, toPathPart repo, "compare", toPathPart base <> "..." <> toPathPart headref] []
| jwiegley/github | src/GitHub/Endpoints/Repos/Commits.hs | bsd-3-clause | 2,535 | 0 | 11 | 374 | 598 | 322 | 276 | 32 | 1 |
module Main where
import qualified Codec.Archive.Tar.Index as Index
import qualified Codec.Archive.Tar.Index.IntTrie as IntTrie
import qualified Codec.Archive.Tar.Index.StringTable as StringTable
import Test.Tasty
import Test.Tasty.QuickCheck
main :: IO ()
main =
defaultMain $
testGroup "tar tests" [
testGroup "string table" [
testProperty "construction and lookup" StringTable.prop_valid
]
, testGroup "int trie" [
testProperty "unit 1" IntTrie.test1,
testProperty "unit 2" IntTrie.test2,
testProperty "unit 3" IntTrie.test3,
testProperty "lookups" IntTrie.prop_lookup_mono,
testProperty "completions" IntTrie.prop_completions_mono
]
, testGroup "index" [
testProperty "lookup" Index.prop_lookup
, testProperty "valid" Index.prop_valid
]
]
| hvr/tar | test/Properties.hs | bsd-3-clause | 871 | 0 | 11 | 209 | 175 | 100 | 75 | 21 | 1 |
{-# OPTIONS_HADDOCK hide #-}
-- #hide
#if __GLASGOW_HASKELL__ >= 701
{-# LANGUAGE Safe #-}
#endif
module Text.XHtml.Frameset.Attributes where
import Text.XHtml.Internals
-- * Extra attributes in XHTML Frameset
frameborder :: Int -> HtmlAttr
frameborder = intAttr "frameborder"
marginheight :: Int -> HtmlAttr
marginheight = intAttr "marginheight"
marginwidth :: Int -> HtmlAttr
marginwidth = intAttr "marginwidth"
noresize :: HtmlAttr
noresize = emptyAttr "noresize"
scrolling :: String -> HtmlAttr
scrolling = strAttr "scrolling"
| DavidAlphaFox/ghc | libraries/xhtml/Text/XHtml/Frameset/Attributes.hs | bsd-3-clause | 654 | 0 | 5 | 193 | 101 | 59 | 42 | 13 | 1 |
module Stack.DefaultColorWhen
( defaultColorWhen
) where
import Stack.Prelude (stdout)
import Stack.Types.Config (ColorWhen (ColorAuto, ColorNever))
import System.Console.ANSI (hSupportsANSIWithoutEmulation)
import System.Environment (lookupEnv)
-- | The default adopts the standard proposed at http://no-color.org/, that
-- color should not be added by default if the @NO_COLOR@ environment variable
-- is present.
defaultColorWhen :: IO ColorWhen
defaultColorWhen = do
-- On Windows, 'hSupportsANSIWithoutEmulation' has the side effect of enabling
-- ANSI for ANSI-capable native (ConHost) terminals, if not already
-- ANSI-enabled. Consequently, it is actioned even if @NO_COLOR@ might exist,
-- as @NO_COLOR@ might be overridden in a yaml configuration file or at the
-- command line.
supportsANSI <- hSupportsANSIWithoutEmulation stdout
mIsNoColor <- lookupEnv "NO_COLOR"
return $ case mIsNoColor of
Just _ -> ColorNever
_ -> case supportsANSI of
Just False -> ColorNever
_ -> ColorAuto
| juhp/stack | src/Stack/DefaultColorWhen.hs | bsd-3-clause | 1,049 | 0 | 14 | 188 | 145 | 82 | 63 | 15 | 3 |
{-# LANGUAGE OverloadedStrings #-}
module Clay.Pseudo where
import Data.Text (Text)
import Clay.Selector
-- List of specific pseudo classes, from:
-- https://developer.mozilla.org/en-US/docs/CSS/Pseudo-classes
after, before :: Refinement
after = ":after"
before = ":before"
link, visited, active, hover, focus, firstChild, lastChild :: Refinement
link = ":link"
visited = ":visited"
active = ":active"
hover = ":hover"
focus = ":focus"
firstChild = ":first-child"
lastChild = ":last-child"
checked, default_, disabled, empty, enabled, firstOfType, indeterminate,
inRange, invalid, lastOfType, onlyChild, onlyOfType, optional,
outOfRange, required, root, target, valid :: Refinement
checked = ":checked"
default_ = ":default"
disabled = ":disabled"
empty = ":empty"
enabled = ":enabled"
firstOfType = ":first-of-type"
indeterminate = ":indeterminate"
inRange = ":in-range"
invalid = ":invalid"
lastOfType = ":last-of-type"
onlyChild = ":only-child"
onlyOfType = ":only-of-type"
optional = ":optional"
outOfRange = ":out-of-range"
required = ":required"
root = ":root"
target = ":target"
valid = ":valid"
lang, nthChild, nthLastChild, nthLastOfType, nthOfType :: Text -> Refinement
lang n = func "lang" [n]
nthChild n = func "nth-child" [n]
nthLastChild n = func "nth-last-child" [n]
nthLastOfType n = func "nth-last-of-type" [n]
nthOfType n = func "nth-of-type" [n]
| damianfral/clay | src/Clay/Pseudo.hs | bsd-3-clause | 1,541 | 0 | 6 | 350 | 319 | 207 | 112 | 42 | 1 |
module Main where
import Haste
import Haste.DOM
import Haste.Events
main = withElems ["a","b","op","result"] calculator
calculator [a,b,op,result] = do
onEvent a KeyUp $ \_ -> recalculate
onEvent b KeyUp $ \_ -> recalculate
onEvent op Change $ \_ -> recalculate
where
recalculate = do
ma <- getValue a
mb <- getValue b
Just op' <- getValue op
case (ma, mb) of
(Just a', Just b') -> setProp result "innerHTML" (toString $ calc op' a' b')
_ -> return ()
calc "+" = (+)
calc "-" = (-)
calc "*" = (*)
calc "/" = (/)
calc _ = \_ _ -> 0 :: Double
| beni55/haste-compiler | examples/calculator/calculator.hs | bsd-3-clause | 643 | 0 | 15 | 205 | 271 | 142 | 129 | 21 | 6 |
module Meas () where
import Language.Haskell.Liquid.Prelude
mylen :: [a] -> Int
mylen [] = 0
mylen (_:xs) = 1 + mylen xs
mymap f [] = []
mymap f (x:xs) = (f x) : (mymap f xs)
zs :: [Int]
zs = [1..100]
prop2 = liquidAssertB (n1 == n2)
where n1 = mylen zs
n2 = mylen $ mymap (`plus` 1) zs
| mightymoose/liquidhaskell | tests/pos/meas4.hs | bsd-3-clause | 325 | 0 | 9 | 100 | 170 | 94 | 76 | 12 | 1 |
module Lamdu.Data.ExampleDB(initDB, createBuiltins) where
import Control.Applicative (Applicative(..), liftA2, (<$>))
import Control.Lens.Operators
import Control.Monad (join, unless, void, (<=<))
import Control.Monad.Trans.Class (lift)
import Control.MonadA (MonadA)
import Data.Binary (Binary(..))
import Data.Foldable (traverse_)
import Data.List.Split (splitOn)
import Data.Store.Db (Db)
import Data.Store.Guid (Guid)
import Data.Store.IRef (IRef, Tag)
import Data.Store.Rev.Branch (Branch)
import Data.Store.Rev.Version (Version)
import Data.Store.Transaction (Transaction, setP)
import Data.Traversable (traverse)
import Lamdu.Data.Anchors (PresentationMode(..))
import qualified Control.Lens as Lens
import qualified Control.Monad.Trans.Writer as Writer
import qualified Data.Store.Guid as Guid
import qualified Data.Store.IRef as IRef
import qualified Data.Store.Rev.Branch as Branch
import qualified Data.Store.Rev.Version as Version
import qualified Data.Store.Rev.View as View
import qualified Data.Store.Transaction as Transaction
import qualified Lamdu.Data.DbLayout as Db
import qualified Lamdu.Data.Definition as Definition
import qualified Lamdu.Data.Expression as Expr
import qualified Lamdu.Data.Expression.IRef as ExprIRef
import qualified Lamdu.Data.Expression.Lens as ExprLens
import qualified Lamdu.Data.Expression.Utils as ExprUtil
import qualified Lamdu.Data.FFI as FFI
import qualified Lamdu.Data.Ops as DataOps
import qualified Lamdu.GUI.WidgetIdIRef as WidgetIdIRef
newTodoIRef :: MonadA m => Transaction m (IRef (Tag m) a)
newTodoIRef = fmap IRef.unsafeFromGuid Transaction.newKey
fixIRef ::
(Binary a, MonadA m) =>
(IRef (Tag m) a -> Transaction m a) ->
Transaction m (IRef (Tag m) a)
fixIRef createOuter = do
x <- newTodoIRef
Transaction.writeIRef x =<< createOuter x
return x
createBuiltins ::
MonadA m => (Guid -> Guid) -> Transaction m ((FFI.Env (Tag m), Db.SpecialFunctions (Tag m)), [ExprIRef.DefIM m])
createBuiltins augmentTagGuids =
Writer.runWriterT $ do
list <- mkDefinitionRef $ publicBuiltin "Data.List.List" setToSet
let listOf = mkApply list
headTag <- lift $ namedTag "head" headTagGuid
tailTag <- lift $ namedTag "tail" tailTagGuid
cons <-
publicBuiltin "Prelude.:" $ forAll "a" $ \a ->
mkPi (mkRecordType pure [(headTag, a), (tailTag, listOf a)]) $ listOf a
nil <- publicBuiltin "Prelude.[]" $ forAll "a" listOf
publicBuiltin_ "Data.List.tail" $ forAll "a" (endo . listOf)
publicBuiltin_ "Data.List.head" . forAll "a" $ join (mkPi . listOf)
_ <-
publicBuiltin "Data.Map.Map" $ mkPiRecord
[ ("Key", set)
, ("Value", set)
] set
mybe <- mkDefinitionRef $ publicBuiltin "Data.Maybe.Maybe" setToSet
let maybeOf = mkApply mybe
publicBuiltin_ "Prelude.Just" $ forAll "a" $ \a -> mkPi a $ maybeOf a
publicBuiltin_ "Prelude.Nothing" $ forAll "a" $ \a -> maybeOf a
publicBuiltin_ "Data.List.caseMaybe" . forAll "a" $ \a -> forAll "b" $ \b ->
mkPiRecord
[ ( "maybe", maybeOf a )
, ( "nothing", b )
, ( "just", mkPi a b )
] b
bool <- mkDefinitionRef $ publicBuiltin "Prelude.Bool" set
true <- publicBuiltin "Prelude.True" bool
false <- publicBuiltin "Prelude.False" bool
publicBuiltin_ "Prelude.not" $ mkPi bool bool
traverse_ ((`publicBuiltin_` mkInfixType bool bool bool) . ("Prelude."++))
["&&", "||"]
publicBuiltin_ "Prelude.if" . forAll "a" $ \a ->
mkPiRecord
[ ("condition", bool)
, ("then", a)
, ("else", a)
] a
publicBuiltin_ "Prelude.id" $ forAll "a" endo
publicBuiltin_ "Prelude.const" .
forAll "a" $ \a ->
forAll "b" $ \b ->
mkPi a $ mkPi b a
publicBuiltin_ "Data.Function.fix" . forAll "a" $ \a -> mkPi (mkPi a a) a
publicBuiltin_ "Data.List.reverse" $ forAll "a" (endo . listOf)
publicBuiltin_ "Data.List.last" . forAll "a" $ join (mkPi . listOf)
publicBuiltin_ "Data.List.null" . forAll "a" $ \a -> mkPi (listOf a) bool
publicBuiltin_ "Data.List.length" . forAll "a" $ \a ->
mkPi (listOf a) integer
publicBuiltin_ "Prelude.product" . forAll "a" $ \a ->
mkPi (listOf a) a
publicBuiltin_ "Prelude.sum" . forAll "a" $ \a ->
mkPi (listOf a) a
publicBuiltin_ "Prelude.maximum" . forAll "a" $ \a ->
mkPi (listOf a) a
publicBuiltin_ "Prelude.minimum" . forAll "a" $ \a ->
mkPi (listOf a) a
let
filterType predName =
forAll "a" $ \a ->
mkPiRecord
[ ("from", listOf a)
, (predName, mkPi a bool)
] $ listOf a
publicDef_ "filter" Verbose ["Data", "List"] "filter" $ filterType "predicate"
publicDef_ "take" Verbose ["Data", "List"] "takeWhile" $ filterType "while"
publicDef_ "take" Verbose ["Data", "List"] "take" . forAll "a" $ \a ->
mkPiRecord
[ ("from", listOf a)
, ("count", integer)
] $ listOf a
publicBuiltin_ "Data.List.map" .
forAll "a" $ \a ->
forAll "b" $ \b ->
mkPiRecord
[ ("list", listOf a)
, ("mapping", mkPi a b)
] $ listOf b
publicBuiltin_ "Data.List.concat" . forAll "a" $ \a ->
mkPi (listOf (listOf a)) (listOf a)
publicBuiltin_ "Data.List.replicate" . forAll "a" $ \a ->
mkPiRecord
[ ("item", a)
, ("count", integer)
] $ listOf a
publicBuiltin_ "Data.List.foldl" . forAll "a" $ \a -> forAll "b" $ \b ->
mkPiRecord
[ ( "list", listOf b )
, ( "initial", a )
, ( "next"
, mkPiRecord
[ ("accumulator", a)
, ("item", b)
] a
)
] a
publicBuiltin_ "Data.List.foldr" . forAll "a" $ \a -> forAll "b" $ \b ->
mkPiRecord
[ ( "list", listOf a )
, ( "initial", b )
, ( "next"
, mkPiRecord
[ ("item", a)
, ("rest", b)
] b
)
] b
publicBuiltin_ "Data.List.caseList" . forAll "a" $ \a -> forAll "b" $ \b ->
mkPiRecord
[ ( "list", listOf a )
, ( "empty", b )
, ( "cons"
, mkPiRecord
[ ("item", a)
, ("rest", listOf a)
] b
)
] b
publicBuiltin_ "Data.List.zipWith" . forAll "a" $ \a -> forAll "b" $ \b -> forAll "c" $ \c ->
mkPiRecord
[ ( "func", mkPiRecord [("x", a), ("y", b)] c)
, ( "xs", listOf a )
, ( "ys", listOf b )
] $ listOf c
let aToAToA = forAll "a" $ \a -> mkInfixType a a a
traverse_ ((`publicBuiltin_` aToAToA) . ("Prelude." ++))
["+", "-", "*", "/", "^", "++", "div"]
publicDef_ "%" Infix ["Prelude"] "mod" aToAToA
publicBuiltin_ "Prelude.negate" $ forAll "a" endo
publicBuiltin_ "Prelude.sqrt" $ forAll "a" endo
publicBuiltin_ "Prelude.floor" $ forAll "a" $ \a -> forAll "b" $ \b -> mkPi a b
let aToAToBool = forAll "a" $ \a -> mkInfixType a a bool
traverse_ ((`publicBuiltin_` aToAToBool) . ("Prelude." ++))
["==", "/=", "<=", ">=", "<", ">"]
publicDef_ ".." Infix ["Prelude"] "enumFromTo" . mkInfixType integer integer $ listOf integer
publicBuiltin_ "Prelude.enumFrom" . mkPi integer $ listOf integer
publicDef_ "iterate" Verbose ["Data", "List"] "iterate" .
forAll "a" $ \a ->
mkPiRecord [("initial", a), ("step", endo a)] $ listOf a
let
specialFunctions = Db.SpecialFunctions
{ Db.sfNil = nil
, Db.sfCons = cons
, Db.sfHeadTag = headTagGuid
, Db.sfTailTag = tailTagGuid
}
ffiEnv = FFI.Env
{ FFI.trueDef = true
, FFI.falseDef = false
}
return (ffiEnv, specialFunctions)
where
publicDef_ name presentationMode ffiPath ffiName mkType =
void $ publicDef name presentationMode ffiPath ffiName mkType
publicDef name presentationMode ffiPath ffiName mkType = publicize $ do
typeI <- mkType
DataOps.newDefinition name presentationMode .
(`Definition.Body` typeI) . Definition.ContentBuiltin .
Definition.Builtin $ Definition.FFIName ffiPath ffiName
publicBuiltin fullyQualifiedName =
publicDef name (DataOps.presentationModeOfName name) path name
where
path = init fqPath
name = last fqPath
fqPath = splitOn "." fullyQualifiedName
publicBuiltin_ builtinName typeMaker =
void $ publicBuiltin builtinName typeMaker
endo = join mkPi
set = ExprIRef.newExprBody $ ExprLens.bodyType # ()
integer = ExprIRef.newExprBody $ Expr.BodyLeaf Expr.IntegerType
forAll name f = fmap ExprIRef.ExpressionI . fixIRef $ \aI -> do
let aGuid = IRef.guid aI
setP (Db.assocNameRef aGuid) name
s <- set
return . ExprUtil.makePi aGuid s =<<
f ((ExprIRef.newExprBody . Lens.review ExprLens.bodyParameterRef) aGuid)
setToSet = mkPi set set
mkPi mkArgType mkResType = fmap snd . join $ liftA2 ExprIRef.newPi mkArgType mkResType
mkApply mkFunc mkArg =
ExprIRef.newExprBody =<< liftA2 ExprUtil.makeApply mkFunc mkArg
newTag name = namedTag name . augmentTagGuids $ Guid.fromString name
namedTag name tagGuid = do
setP (Db.assocNameRef tagGuid) name
ExprIRef.newExprBody $ ExprLens.bodyTag # tagGuid
mkRecordType mkTag fields = do
tagFields <- traverse (Lens._1 mkTag <=< Lens.sequenceOf Lens._2) fields
ExprIRef.newExprBody $ Expr.BodyRecord Expr.Record
{ Expr._recordKind = Expr.KType
, Expr._recordFields = tagFields
}
publicize f = do
x <- lift f
Writer.tell [x]
return x
mkPiRecord = mkPi . mkRecordType newTag
mkInfixRecordType lType rType = do
l <- namedTag "l" $ Guid.fromString "infixlarg"
r <- namedTag "r" $ Guid.fromString "infixrarg"
mkRecordType pure [(l, lType), (r, rType)]
mkInfixType lType rType =
mkPi $ mkInfixRecordType lType rType
mkDefinitionRef f =
ExprIRef.newExprBody . (ExprLens.bodyDefinitionRef # ) <$> f
headTagGuid = Guid.fromString "headTag"
tailTagGuid = Guid.fromString "tailTag"
newBranch :: MonadA m => String -> Version (Tag m) -> Transaction m (Branch (Tag m))
newBranch name ver = do
branch <- Branch.new ver
setP (Db.assocNameRef (Branch.guid branch)) name
return branch
initDB :: (Guid -> Guid) -> Db -> IO ()
initDB augmentTagGuids db =
Db.runDbTransaction db $ do
exists <- Transaction.irefExists $ Db.branches Db.revisionIRefs
unless exists $ do
emptyVersion <- Version.makeInitialVersion []
master <- newBranch "master" emptyVersion
view <- View.new master
let writeRevAnchor f = Transaction.writeIRef (f Db.revisionIRefs)
writeRevAnchor Db.view view
writeRevAnchor Db.branches [master]
writeRevAnchor Db.currentBranch master
writeRevAnchor Db.redos []
let paneWId = WidgetIdIRef.fromIRef $ Db.panes Db.codeIRefs
writeRevAnchor Db.cursor paneWId
Db.runViewTransaction view $ do
((ffiEnv, specialFunctions), builtins) <- createBuiltins augmentTagGuids
let writeCodeAnchor f = Transaction.writeIRef (f Db.codeIRefs)
writeCodeAnchor Db.clipboards []
writeCodeAnchor Db.specialFunctions specialFunctions
writeCodeAnchor Db.ffiEnv ffiEnv
writeCodeAnchor Db.globals builtins
writeCodeAnchor Db.panes []
writeCodeAnchor Db.preJumps []
writeCodeAnchor Db.preCursor paneWId
writeCodeAnchor Db.postCursor paneWId
writeCodeAnchor Db.tags []
-- Prevent undo into the invalid empty revision
newVer <- Branch.curVersion master
Version.preventUndo newVer
| schell/lamdu | Lamdu/Data/ExampleDB.hs | gpl-3.0 | 11,600 | 0 | 20 | 2,843 | 3,860 | 1,968 | 1,892 | 278 | 1 |
module T11272a where
import Control.Monad.Trans.State
import Control.Monad
overloaded :: Ord a => a -> a -> State () ()
overloaded x y = do
() <- get
when (x <= y) (overloaded y x)
{-# INLINABLE overloaded #-}
| ezyang/ghc | testsuite/tests/simplCore/should_compile/T11272a.hs | bsd-3-clause | 216 | 0 | 9 | 44 | 88 | 46 | 42 | 7 | 1 |
{-# LANGUAGE MultiParamTypeClasses, FunctionalDependencies, FlexibleContexts, GADTs #-}
module FDsFromGivens where
class C a b | a -> b where
cop :: a -> b -> ()
{- Failing, as it righteously should! It's inaccessible code -}
-- But (c.f. test T5236) we no longer reject this in the
-- typechecker (see Trac #12466)
-- Instead we get a redundant pattern-match warning,
-- in the post-typechecking pattern-match checks
g1 :: (C Char [a], C Char Bool) => a -> ()
g1 x = ()
| olsner/ghc | testsuite/tests/typecheck/should_fail/FDsFromGivens.hs | bsd-3-clause | 478 | 0 | 9 | 90 | 85 | 49 | 36 | 6 | 1 |
module T10598_fail5 where
data Foo = Foo
deriving Eq
deriving Ord
| ezyang/ghc | testsuite/tests/deriving/should_fail/T10598_fail5.hs | bsd-3-clause | 71 | 0 | 5 | 16 | 17 | 11 | 6 | 4 | 0 |
module Tree where
data Tree a = EmptyTree | Node a (Tree a) (Tree a) deriving (Show, Read, Eq)
singleton :: a -> Tree a
singleton x = Node x EmptyTree EmptyTree
treeInsert :: (Ord a) => a -> Tree a -> Tree a
treeInsert x EmptyTree = singleton x
treeInsert x tree@(Node e left right)
| x == e = tree
| x < e = Node e (treeInsert x left) right
| x > e = Node e left (treeInsert x right)
treeElem :: (Ord a) => a -> Tree a -> Bool
treeElem _ EmptyTree = False
treeElem x (Node e left right)
| x == e = True
| x < e = treeElem x left
| x > e = treeElem x right
instance Functor Tree where
fmap f EmptyTree = EmptyTree
fmap f (Node e left right) = Node (f e) (fmap f left) (fmap f right)
| fredmorcos/attic | snippets/haskell/Tree.hs | isc | 708 | 0 | 8 | 178 | 373 | 183 | 190 | 19 | 1 |
{-# LANGUAGE ViewPatterns #-}
-----------------------------------------------------------------------------
-- |
-- Module : Algebra.Graph.Test.Acyclic.AdjacencyMap
-- Copyright : (c) Andrey Mokhov 2016-2022
-- License : MIT (see the file LICENSE)
-- Maintainer : [email protected]
-- Stability : experimental
--
-- Testsuite for "Algebra.Graph.Acyclic.AdjacencyMap".
-----------------------------------------------------------------------------
module Algebra.Graph.Test.Acyclic.AdjacencyMap (testAcyclicAdjacencyMap) where
import Algebra.Graph.Acyclic.AdjacencyMap
import Algebra.Graph.Test hiding (shrink)
import Data.Bifunctor
import Data.Tuple
import qualified Algebra.Graph.AdjacencyMap as AM
import qualified Algebra.Graph.AdjacencyMap.Algorithm as AM
import qualified Algebra.Graph.NonEmpty.AdjacencyMap as NonEmpty
import qualified Data.Set as Set
import qualified GHC.Exts as Exts
type AAI = AdjacencyMap Int
type AI = AM.AdjacencyMap Int
-- TODO: Switch to using generic tests.
testAcyclicAdjacencyMap :: IO ()
testAcyclicAdjacencyMap = do
putStrLn "\n============ Acyclic.AdjacencyMap.Show ============"
test "show empty == \"empty\"" $
show (empty :: AAI) == "empty"
test "show (shrink 1) == \"vertex 1\"" $
show (shrink 1 :: AAI) == "vertex 1"
test "show (shrink $ 1 + 2) == \"vertices [1,2]\"" $
show (shrink $ 1 + 2 :: AAI) == "vertices [1,2]"
test "show (shrink $ 1 * 2) == \"(fromJust . toAcyclic) (edge 1 2)\"" $
show (shrink $ 1 * 2 :: AAI) == "(fromJust . toAcyclic) (edge 1 2)"
test "show (shrink $ 1 * 2 * 3) == \"(fromJust . toAcyclic) (edges [(1,2),(1,3),(2,3)])\"" $
show (shrink $ 1 * 2 * 3 :: AAI) == "(fromJust . toAcyclic) (edges [(1,2),(1,3),(2,3)])"
test "show (shrink $ 1 * 2 + 3) == \"(fromJust . toAcyclic) (overlay (vertex 3) (edge 1 2))\"" $
show (shrink $ 1 * 2 + 3 :: AAI) == "(fromJust . toAcyclic) (overlay (vertex 3) (edge 1 2))"
putStrLn "\n============ Acyclic.AdjacencyMap.fromAcyclic ============"
test "fromAcyclic empty == empty" $
fromAcyclic (empty :: AAI) == AM.empty
test "fromAcyclic . vertex == vertex" $ \(x :: Int) ->
(fromAcyclic . vertex) x == AM.vertex x
test "fromAcyclic (shrink $ 1 * 3 * 2) == star 1 [2,3]" $
fromAcyclic (shrink $ 1 * 3 + 2) == 1 * 3 + (2 :: AI)
test "vertexCount . fromAcyclic == vertexCount" $ \(x :: AAI) ->
(AM.vertexCount . fromAcyclic) x == vertexCount x
test "edgeCount . fromAcyclic == edgeCount" $ \(x :: AAI) ->
(AM.edgeCount . fromAcyclic) x == edgeCount x
test "isAcyclic . fromAcyclic == const True" $ \(x :: AAI) ->
(AM.isAcyclic . fromAcyclic) x == const True x
putStrLn "\n============ Acyclic.AdjacencyMap.empty ============"
test "isEmpty empty == True" $
isEmpty (empty :: AAI) == True
test "hasVertex x empty == False" $ \x ->
hasVertex x (empty :: AAI) == False
test "vertexCount empty == 0" $
vertexCount (empty :: AAI) == 0
test "edgeCount empty == 0" $
edgeCount (empty :: AAI) == 0
putStrLn "\n============ Acyclic.AdjacencyMap.vertex ============"
test "isEmpty (vertex x) == False" $ \(x :: Int) ->
isEmpty (vertex x) == False
test "hasVertex x (vertex y) == (x == y)" $ \(x :: Int) y ->
hasVertex x (vertex y) == (x == y)
test "vertexCount (vertex x) == 1" $ \(x :: Int) ->
vertexCount (vertex x) == 1
test "edgeCount (vertex x) == 0" $ \(x :: Int) ->
edgeCount (vertex x) == 0
putStrLn "\n============ Acyclic.AdjacencyMap.vertices ============"
test "vertices [] == empty" $
vertices [] == (empty :: AAI)
test "vertices [x] == vertex x" $ \(x :: Int) ->
vertices [x] == vertex x
test "hasVertex x . vertices == elem x" $ \(x :: Int) xs ->
(hasVertex x . vertices) xs == elem x xs
test "vertexCount . vertices == length . nub" $ \(xs :: [Int]) ->
(vertexCount . vertices) xs == (length . nubOrd) xs
test "vertexSet . vertices == Set.fromList" $ \(xs :: [Int]) ->
(vertexSet . vertices) xs == Set.fromList xs
putStrLn "\n============ Acyclic.AdjacencyMap.union ============"
test "vertexSet (union x y) == <correct result>" $ \(x :: AAI) (y :: AAI) ->
vertexSet (union x y) == Set.unions [ Set.map Left (vertexSet x)
, Set.map Right (vertexSet y) ]
test "edgeSet (union x y) == <correct result>" $ \(x :: AAI) (y :: AAI) ->
edgeSet (union x y) == Set.unions [ Set.map (bimap Left Left ) (edgeSet x)
, Set.map (bimap Right Right) (edgeSet y) ]
putStrLn "\n============ Acyclic.AdjacencyMap.join ============"
test "vertexSet (join x y) == <correct result>" $ \(x :: AAI) (y :: AAI) ->
vertexSet (join x y) == Set.unions [ Set.map Left (vertexSet x)
, Set.map Right (vertexSet y) ]
test "edgeSet (join x y) == <correct result>" $ \(x :: AAI) (y :: AAI) ->
edgeSet (join x y) == Set.unions
[ Set.map (bimap Left Left ) (edgeSet x)
, Set.map (bimap Right Right) (edgeSet y)
, Set.map (bimap Left Right) (Set.cartesianProduct (vertexSet x) (vertexSet y)) ]
putStrLn "\n============ Acyclic.AdjacencyMap.isSubgraphOf ============"
test "isSubgraphOf empty x == True" $ \(x :: AAI) ->
isSubgraphOf empty x == True
test "isSubgraphOf (vertex x) empty == False" $ \(x :: Int) ->
isSubgraphOf (vertex x) empty == False
test "isSubgraphOf (induce p x) x == True" $ \(x :: AAI) (apply -> p) ->
isSubgraphOf (induce p x) x == True
test "isSubgraphOf x (transitiveClosure x) == True" $ \(x :: AAI) ->
isSubgraphOf x (transitiveClosure x) == True
test "isSubgraphOf x y ==> x <= y" $ \(x :: AAI) z ->
let connect x y = shrink $ fromAcyclic x + fromAcyclic y
-- TODO: Make the precondition stronger
y = connect x (vertices z) -- Make sure we hit the precondition
in isSubgraphOf x y ==> x <= y
putStrLn "\n============ Acyclic.AdjacencyMap.isEmpty ============"
test "isEmpty empty == True" $
isEmpty (empty :: AAI) == True
test "isEmpty (vertex x) == False" $ \(x :: Int) ->
isEmpty (vertex x) == False
test "isEmpty (removeVertex x $ vertex x) == True" $ \(x :: Int) ->
isEmpty (removeVertex x $ vertex x) == True
test "isEmpty (removeEdge 1 2 $ shrink $ 1 * 2) == False" $
isEmpty (removeEdge 1 2 $ shrink $ 1 * 2 :: AAI) == False
putStrLn "\n============ Acyclic.AdjacencyMap.hasVertex ============"
test "hasVertex x empty == False" $ \(x :: Int) ->
hasVertex x empty == False
test "hasVertex x (vertex y) == (x == y)" $ \(x :: Int) y ->
hasVertex x (vertex y) == (x == y)
test "hasVertex x . removeVertex x == const False" $ \(x :: Int) y ->
(hasVertex x . removeVertex x) y == const False y
putStrLn "\n============ Acyclic.AdjacencyMap.hasEdge ============"
test "hasEdge x y empty == False" $ \(x :: Int) y ->
hasEdge x y empty == False
test "hasEdge x y (vertex z) == False" $ \(x :: Int) y z ->
hasEdge x y (vertex z) == False
test "hasEdge 1 2 (shrink $ 1 * 2) == True" $
hasEdge 1 2 (shrink $ 1 * 2 :: AAI) == True
test "hasEdge x y . removeEdge x y == const False" $ \(x :: Int) y z ->
(hasEdge x y . removeEdge x y) z == const False z
test "hasEdge x y == elem (x,y) . edgeList" $ \(x :: Int) y z -> do
(u, v) <- elements ((x, y) : edgeList z)
return $ hasEdge u v z == elem (u, v) (edgeList z)
putStrLn "\n============ Acyclic.AdjacencyMap.vertexCount ============"
test "vertexCount empty == 0" $
vertexCount (empty :: AAI) == 0
test "vertexCount (vertex x) == 1" $ \(x :: Int) ->
vertexCount (vertex x) == 1
test "vertexCount == length . vertexList" $ \(x :: AAI) ->
vertexCount x == (length . vertexList) x
test "vertexCount x < vertexCount y ==> x < y" $ \(x :: AAI) y ->
if vertexCount x < vertexCount y
then property (x < y)
else (vertexCount x > vertexCount y ==> x > y)
putStrLn "\n============ Acyclic.AdjacencyMap.edgeCount ============"
test "edgeCount empty == 0" $
edgeCount (empty :: AAI) == 0
test "edgeCount (vertex x) == 0" $ \(x :: Int) ->
edgeCount (vertex x) == 0
test "edgeCount (shrink $ 1 * 2) == 1" $
edgeCount (shrink $ 1 * 2 :: AAI) == 1
test "edgeCount == length . edgeList" $ \(x :: AAI) ->
edgeCount x == (length . edgeList) x
putStrLn "\n============ Acyclic.AdjacencyMap.vertexList ============"
test "vertexList empty == []" $
vertexList (empty :: AAI) == []
test "vertexList (vertex x) == [x]" $ \(x :: Int) ->
vertexList (vertex x) == [x]
test "vertexList . vertices == nub . sort" $ \(xs :: [Int]) ->
(vertexList . vertices) xs == (nubOrd . sort) xs
putStrLn "\n============ Acyclic.AdjacencyMap.edgeList ============"
test "edgeList empty == []" $
edgeList (empty :: AAI) == []
test "edgeList (vertex x) == []" $ \(x :: Int) ->
edgeList (vertex x) == []
test "edgeList (shrink $ 2 * 1) == [(2,1)]" $
edgeList (shrink $ 2 * 1 :: AAI) == [(2,1)]
test "edgeList . transpose == sort . map swap . edgeList" $ \(x :: AAI) ->
(edgeList . transpose) x == (sort . map swap . edgeList) x
putStrLn "\n============ Acyclic.AdjacencyMap.adjacencyList ============"
test "adjacencyList empty == []" $
adjacencyList (empty :: AAI) == []
test "adjacencyList (vertex x) == [(x, [])]" $ \(x :: Int) ->
adjacencyList (vertex x) == [(x, [])]
test "adjacencyList (shrink $ 1 * 2) == [(1, [2]), (2, [])]" $
adjacencyList (shrink $ 1 * 2 :: AAI) == [(1, [2]), (2, [])]
putStrLn "\n============ Acyclic.AdjacencyMap.vertexSet ============"
test "vertexSet empty == Set.empty" $
vertexSet (empty :: AAI) == Set.empty
test "vertexSet . vertex == Set.singleton" $ \(x :: Int) ->
(vertexSet . vertex) x == Set.singleton x
test "vertexSet . vertices == Set.fromList" $ \(xs :: [Int]) ->
(vertexSet . vertices) xs == Set.fromList xs
putStrLn "\n============ Acyclic.AdjacencyMap.edgeSet ============"
test "edgeSet empty == Set.empty" $
edgeSet (empty :: AAI) == Set.empty
test "edgeSet (vertex x) == Set.empty" $ \(x :: Int) ->
edgeSet (vertex x) == Set.empty
test "edgeSet (shrink $ 1 * 2) == Set.singleton (1,2)" $
edgeSet (shrink $ 1 * 2 :: AAI) == Set.singleton (1,2)
putStrLn "\n============ Acyclic.AdjacencyMap.preSet ============"
test "preSet x empty == Set.empty" $ \(x :: Int) ->
preSet x empty == Set.empty
test "preSet x (vertex x) == Set.empty" $ \(x :: Int) ->
preSet x (vertex x) == Set.empty
test "preSet 1 (shrink $ 1 * 2) == Set.empty" $
preSet 1 (shrink $ 1 * 2 :: AAI) == Set.empty
test "preSet 2 (shrink $ 1 * 2) == Set.fromList [1]" $
preSet 2 (shrink $ 1 * 2 :: AAI) == Set.fromList [1]
test "Set.member x . preSet x == const False" $ \(x :: Int) y ->
(Set.member x . preSet x) y == const False y
putStrLn "\n============ Acyclic.AdjacencyMap.postSet ============"
test "postSet x empty == Set.empty" $ \(x :: Int) ->
postSet x empty == Set.empty
test "postSet x (vertex x) == Set.empty" $ \(x :: Int) ->
postSet x (vertex x) == Set.empty
test "postSet 1 (shrink $ 1 * 2) == Set.fromList [2]" $
postSet 1 (shrink $ 1 * 2 :: AAI) == Set.fromList [2]
test "postSet 2 (shrink $ 1 * 2) == Set.empty" $
postSet 2 (shrink $ 1 * 2 :: AAI) == Set.empty
test "Set.member x . postSet x == const False" $ \(x :: Int) y ->
(Set.member x . postSet x) y == const False y
putStrLn "\n============ Acyclic.AdjacencyMap.removeVertex ============"
test "removeVertex x (vertex x) == empty" $ \(x :: Int) ->
removeVertex x (vertex x) == empty
test "removeVertex 1 (vertex 2) == vertex 2" $
removeVertex 1 (vertex 2 :: AAI) == vertex 2
test "removeVertex 1 (shrink $ 1 * 2) == vertex 2" $
removeVertex 1 (shrink $ 1 * 2 :: AAI) == vertex 2
test "removeVertex x . removeVertex x == removeVertex x" $ \(x :: Int) y ->
(removeVertex x . removeVertex x) y == removeVertex x y
putStrLn "\n============ Acyclic.AdjacencyMap.removeEdge ============"
test "removeEdge 1 2 (shrink $ 1 * 2) == vertices [1,2]" $
removeEdge 1 2 (shrink $ 1 * 2 :: AAI) == vertices [1,2]
test "removeEdge x y . removeEdge x y == removeEdge x y" $ \(x :: Int) y z ->
(removeEdge x y . removeEdge x y) z == removeEdge x y z
test "removeEdge x y . removeVertex x == removeVertex x" $ \(x :: Int) y z ->
(removeEdge x y . removeVertex x) z == removeVertex x z
test "removeEdge 1 2 (shrink $ 1 * 2 * 3) == shrink ((1 + 2) * 3)" $
removeEdge 1 2 (shrink $ 1 * 2 * 3 :: AAI) == shrink ((1 + 2) * 3)
putStrLn "\n============ Acyclic.AdjacencyMap.transpose ============"
test "transpose empty == empty" $
transpose (empty :: AAI) == empty
test "transpose (vertex x) == vertex x" $ \(x :: Int) ->
transpose (vertex x) == vertex x
test "transpose . transpose == id" $ size10 $ \(x :: AAI) ->
(transpose . transpose) x == id x
test "edgeList . transpose == sort . map swap . edgeList" $ \(x :: AAI) ->
(edgeList . transpose) x == (sort . map swap . edgeList) x
putStrLn "\n============ Acyclic.AdjacencyMap.induce ============"
test "induce (const True ) x == x" $ \(x :: AAI) ->
induce (const True ) x == x
test "induce (const False) x == empty" $ \(x :: AAI) ->
induce (const False) x == empty
test "induce (/= x) == removeVertex x" $ \x (y :: AAI) ->
induce (/= x) y == removeVertex x y
test "induce p . induce q == induce (\\x -> p x && q x)" $ \(apply -> p) (apply -> q) (y :: AAI) ->
(induce p . induce q) y == induce (\x -> p x && q x) y
test "isSubgraphOf (induce p x) x == True" $ \(apply -> p) (x :: AAI) ->
isSubgraphOf (induce p x) x == True
putStrLn "\n============ Acyclic.AdjacencyMap.induceJust ============"
test "induceJust (vertex Nothing) == empty" $
induceJust (vertex Nothing) == (empty :: AAI)
test "induceJust . vertex . Just == vertex" $ \(x :: Int) ->
(induceJust . vertex . Just) x == vertex x
putStrLn "\n============ Acyclic.AdjacencyMap.box ============"
test "edgeList (box (shrink $ 1 * 2) (shrink $ 10 * 20)) == <correct result>\n" $
edgeList (box (shrink $ 1 * 2) (shrink $ 10 * 20)) == [ ((1,10), (1,20))
, ((1,10), (2,10))
, ((1,20), (2,20))
, ((2,10), (2 :: Int,20 :: Int)) ]
let gmap f = shrink . AM.gmap f . fromAcyclic
unit = gmap $ \(a :: Int, () ) -> a
comm = gmap $ \(a :: Int, b :: Int) -> (b, a)
test "box x y ~~ box y x" $ size10 $ \x y ->
comm (box x y) == box y x
test "box x (vertex ()) ~~ x" $ size10 $ \x ->
unit(box x (vertex ())) == (x `asTypeOf` empty)
test "box x empty ~~ empty" $ size10 $ \x ->
unit(box x empty) == empty
let assoc = gmap $ \(a :: Int, (b :: Int, c :: Int)) -> ((a, b), c)
test "box x (box y z) ~~ box (box x y) z" $ size10 $ \x y z ->
assoc (box x (box y z)) == box (box x y) z
test "transpose (box x y) == box (transpose x) (transpose y)" $ size10 $ \(x :: AAI) (y :: AAI) ->
transpose (box x y) == box (transpose x) (transpose y)
test "vertexCount (box x y) == vertexCount x * vertexCount y" $ size10 $ \(x :: AAI) (y :: AAI) ->
vertexCount (box x y) == vertexCount x * vertexCount y
test "edgeCount (box x y) <= vertexCount x * edgeCount y + edgeCount x * vertexCount y" $ size10 $ \(x :: AAI) (y :: AAI) ->
edgeCount (box x y) <= vertexCount x * edgeCount y + edgeCount x * vertexCount y
putStrLn "\n============ Acyclic.AdjacencyMap.transitiveClosure ============"
test "transitiveClosure empty == empty" $
transitiveClosure empty == (empty :: AAI)
test "transitiveClosure (vertex x) == vertex x" $ \(x :: Int) ->
transitiveClosure (vertex x) == vertex x
test "transitiveClosure (shrink $ 1 * 2 + 2 * 3) == shrink (1 * 2 + 1 * 3 + 2 * 3)" $
transitiveClosure (shrink $ 1 * 2 + 2 * 3 :: AAI) == shrink (1 * 2 + 1 * 3 + 2 * 3)
test "transitiveClosure . transitiveClosure == transitiveClosure" $ \(x :: AAI) ->
(transitiveClosure . transitiveClosure) x == transitiveClosure x
putStrLn "\n============ Acyclic.AdjacencyMap.topSort ============"
test "topSort empty == []" $
topSort (empty :: AAI) == []
test "topSort (vertex x) == [x]" $ \(x :: Int) ->
topSort (vertex x) == [x]
test "topSort (shrink $ 1 * (2 + 4) + 3 * 4) == [1, 2, 3, 4]" $
topSort (shrink $ 1 * (2 + 4) + 3 * 4) == [1, 2, 3, 4 :: Int]
test "topSort (join x y) == fmap Left (topSort x) ++ fmap Right (topSort y)" $ \(x :: AAI) (y :: AAI) ->
topSort (join x y) == fmap Left (topSort x) ++ fmap Right (topSort y)
test "Right . topSort == AM.topSort . fromAcyclic" $ \(x :: AAI) ->
Right (topSort x) == AM.topSort (fromAcyclic x)
putStrLn "\n============ Acyclic.AdjacencyMap.scc ============"
test " scc empty == empty" $
scc (AM.empty :: AI) == empty
test " scc (vertex x) == vertex (NonEmpty.vertex x)" $ \(x :: Int) ->
scc (AM.vertex x) == vertex (NonEmpty.vertex x)
test " scc (edge 1 1) == vertex (NonEmpty.edge 1 1)" $
scc (AM.edge 1 1 :: AI) == vertex (NonEmpty.edge 1 1)
test "edgeList $ scc (edge 1 2) == [ (NonEmpty.vertex 1, NonEmpty.vertex 2) ]" $
edgeList (scc (AM.edge 1 2 :: AI)) == [ (NonEmpty.vertex 1, NonEmpty.vertex 2) ]
test "edgeList $ scc (3 * 1 * 4 * 1 * 5) == <correct result>" $
edgeList (scc (3 * 1 * 4 * 1 * 5)) == [ (NonEmpty.vertex 3, NonEmpty.vertex (5 :: Int))
, (NonEmpty.vertex 3, NonEmpty.clique1 (Exts.fromList [1,4,1]))
, (NonEmpty.clique1 (Exts.fromList [1,4,1]), NonEmpty.vertex 5) ]
putStrLn "\n============ Acyclic.AdjacencyMap.toAcyclic ============"
test "toAcyclic (path [1,2,3]) == Just (shrink $ 1 * 2 + 2 * 3)" $
toAcyclic (AM.path [1,2,3]) == Just (shrink $ 1 * 2 + 2 * 3 :: AAI)
test "toAcyclic (clique [3,2,1]) == Just (transpose (shrink $ 1 * 2 * 3))" $
toAcyclic (AM.clique [3,2,1]) == Just (transpose (shrink $ 1 * 2 * 3 :: AAI))
test "toAcyclic (circuit [1,2,3]) == Nothing" $
toAcyclic (AM.circuit [1,2,3 :: Int]) == Nothing
test "toAcyclic . fromAcyclic == Just" $ \(x :: AAI) ->
(toAcyclic . fromAcyclic) x == Just x
putStrLn "\n============ Acyclic.AdjacencyMap.toAcyclicOrd ============"
test "toAcyclicOrd empty == empty" $
toAcyclicOrd AM.empty == (empty :: AAI)
test "toAcyclicOrd . vertex == vertex" $ \(x :: Int) ->
(toAcyclicOrd . AM.vertex) x == vertex x
test "toAcyclicOrd (1 + 2) == shrink (1 + 2)" $
toAcyclicOrd (1 + 2) == (shrink $ 1 + 2 :: AAI)
test "toAcyclicOrd (1 * 2) == shrink (1 * 2)" $
toAcyclicOrd (1 * 2) == (shrink $ 1 * 2 :: AAI)
test "toAcyclicOrd (2 * 1) == shrink (1 + 2)" $
toAcyclicOrd (2 * 1) == (shrink $ 1 + 2 :: AAI)
test "toAcyclicOrd (1 * 2 * 1) == shrink (1 * 2)" $
toAcyclicOrd (1 * 2 * 1) == (shrink $ 1 * 2 :: AAI)
test "toAcyclicOrd (1 * 2 * 3) == shrink (1 * 2 * 3)" $
toAcyclicOrd (1 * 2 * 3) == (shrink $ 1 * 2 * 3 :: AAI)
putStrLn "\n============ Acyclic.AdjacencyMap.shrink ============"
test "shrink . AM.vertex == vertex" $ \x ->
(shrink . AM.vertex) x == (vertex x :: AAI)
test "shrink . AM.vertices == vertices" $ \x ->
(shrink . AM.vertices) x == (vertices x :: AAI)
test "shrink . fromAcyclic == id" $ \(x :: AAI) ->
(shrink . fromAcyclic) x == id x
putStrLn "\n============ Acyclic.AdjacencyMap.consistent ============"
test "Arbitrary" $ \(x :: AAI) -> consistent x
test "empty" $ consistent (empty :: AAI)
test "vertex" $ \(x :: Int) -> consistent (vertex x)
test "vertices" $ \(xs :: [Int]) -> consistent (vertices xs)
test "union" $ \(x :: AAI) (y :: AAI) -> consistent (union x y)
test "join" $ \(x :: AAI) (y :: AAI) -> consistent (join x y)
test "transpose" $ \(x :: AAI) -> consistent (transpose x)
test "box" $ size10 $ \(x :: AAI) (y :: AAI) -> consistent (box x y)
test "transitiveClosure" $ \(x :: AAI) -> consistent (transitiveClosure x)
test "scc" $ \(x :: AI) -> consistent (scc x)
test "toAcyclic" $ \(x :: AI) -> fmap consistent (toAcyclic x) /= Just False
test "toAcyclicOrd" $ \(x :: AI) -> consistent (toAcyclicOrd x)
| snowleopard/alga | test/Algebra/Graph/Test/Acyclic/AdjacencyMap.hs | mit | 23,251 | 0 | 16 | 7,821 | 6,872 | 3,393 | 3,479 | -1 | -1 |
{-# LANGUAGE TypeFamilies, TypeOperators #-}
{-# LANGUAGE GADTs, DataKinds, PolyKinds #-}
{-# LANGUAGE TypeSynonymInstances, FlexibleContexts, FlexibleInstances, UndecidableInstances #-}
module Singletons where
import Data.Type.Equality
data family Sing (a :: k) :: *
class SingI (a :: k) where
sing :: Sing a
data Nat = Z | S Nat
deriving (Eq, Ord, Show)
type SNat = (Sing :: Nat -> *)
data instance Sing (n :: Nat) where
SZ :: SNat Z
SS :: SNat n -> SNat (S n)
instance SingI Z where
sing = SZ
instance SingI n => SingI (S n) where
sing = SS sing
instance TestEquality SNat where
testEquality SZ SZ = Just Refl
testEquality (SS n) (SS m) = do
Refl <- testEquality n m
return Refl
testEquality _ _ = Nothing
type SList = (Sing :: [a] -> *)
data instance Sing (l :: [a]) where
SNil :: SList '[]
SCons :: Sing a -> SList l -> SList (a ': l)
instance TestEquality (Sing :: a -> *) => TestEquality (Sing :: [a] -> *) where
testEquality SNil SNil = Just Refl
testEquality (SCons a1 l1) (SCons a2 l2) = do
Refl <- testEquality a1 a2
Refl <- testEquality l1 l2
return Refl
testEquality _ _ = Nothing
| vladfi1/hs-misc | Singletons.hs | mit | 1,155 | 4 | 10 | 266 | 455 | 235 | 220 | 35 | 0 |
module Cmm.Cmm where
import Cmm.CmmNode
| C-Elegans/linear | Cmm/Cmm.hs | mit | 40 | 0 | 4 | 5 | 11 | 7 | 4 | 2 | 0 |
{-# htermination enumFromThenTo :: Bool -> Bool -> Bool -> [Bool] #-}
| ComputationWithBoundedResources/ara-inference | doc/tpdb_trs/Haskell/full_haskell/Prelude_enumFromThenTo_7.hs | mit | 70 | 0 | 2 | 12 | 3 | 2 | 1 | 1 | 0 |
-- |
-- Module : Data.Edison.Coll.SkewHeap
-- Copyright : Copyright (c) 1998-1999, 2008 Chris Okasaki
-- License : MIT; see COPYRIGHT file for terms and conditions
--
-- Maintainer : robdockins AT fastmail DOT fm
-- Stability : stable
-- Portability : GHC, Hugs (MPTC and FD)
--
-- Skew heaps.
--
-- /References:/
--
-- * Daniel Sleator and Robert Tarjan. \"Self-Adjusting Heaps\".
-- /SIAM Journal on Computing/, 15(1):52-69, February 1986.
module Data.Edison.Coll.SkewHeap (
-- * Type of skew heaps
Heap, -- instance of Coll/CollX, OrdColl/OrdCollX
-- * CollX operations
empty,singleton,fromSeq,insert,insertSeq,union,unionSeq,delete,deleteAll,
deleteSeq,null,size,member,count,strict,structuralInvariant,
-- * Coll operations
toSeq, lookup, lookupM, lookupAll, lookupWithDefault, fold, fold',
fold1, fold1', filter, partition, strictWith,
-- * OrdCollX operations
deleteMin,deleteMax,unsafeInsertMin,unsafeInsertMax,unsafeFromOrdSeq,
unsafeAppend,filterLT,filterLE,filterGT,filterGE,partitionLT_GE,
partitionLE_GT,partitionLT_GT,
-- * OrdColl operations
minView,minElem,maxView,maxElem,foldr,foldr',foldl,foldl',
foldr1,foldr1',foldl1,foldl1',toOrdSeq,
unsafeMapMonotonic,
-- * Documentation
moduleName
) where
import Prelude hiding (null,foldr,foldl,foldr1,foldl1,lookup,filter)
import qualified Data.Edison.Coll as C
import qualified Data.Edison.Seq as S
import Data.Edison.Coll.Defaults
import Data.Monoid
import Data.Semigroup as SG
import Control.Monad
import Test.QuickCheck
moduleName :: String
moduleName = "Data.Edison.Coll.SkewHeap"
data Heap a = E | T a (Heap a) (Heap a)
-- invariants:
-- * Heap order
structuralInvariant :: Ord a => Heap a -> Bool
structuralInvariant E = True
structuralInvariant t@(T x _ _) = isMin x t
where isMin _ E = True
isMin x (T y l r) = x <= y && isMin y l && isMin y r
{-
For delete,deleteAll,filter,partition: could compute fringe and reduce
rather that rebuilding with union at every deleted node
-}
empty :: Ord a => Heap a
empty = E
singleton :: Ord a => a -> Heap a
singleton x = T x E E
insert :: Ord a => a -> Heap a -> Heap a
insert x E = T x E E
insert x h@(T y a b)
| x <= y = T x h E
| otherwise = T y (insert x b) a
union :: Ord a => Heap a -> Heap a -> Heap a
union E h = h
union h@(T x a b) h' = union' h x a b h'
where union' h _ _ _ E = h
union' hx x a b hy@(T y c d)
| x <= y = T x (union' hy y c d b) a
| otherwise = T y (union' hx x a b d) c
delete :: Ord a => a -> Heap a -> Heap a
delete x h = case del h of
Just h' -> h'
Nothing -> h
where del (T y a b) =
case compare x y of
LT -> Nothing
EQ -> Just (union a b)
GT -> case del b of
Just b' -> Just (T y a b')
Nothing -> case del a of
Just a' -> Just (T y a' b)
Nothing -> Nothing
del E = Nothing
deleteAll :: Ord a => a -> Heap a -> Heap a
deleteAll x h@(T y a b) =
case compare x y of
LT -> h
EQ -> union (deleteAll x a) (deleteAll x b)
GT -> T y (deleteAll x a) (deleteAll x b)
deleteAll _ E = E
null :: Ord a => Heap a -> Bool
null E = True
null _ = False
size :: Ord a => Heap a -> Int
size h = sz h 0
where sz E i = i
sz (T _ a b) i = sz a (sz b (i + 1))
member :: Ord a => a -> Heap a -> Bool
member _ E = False
member x (T y a b) =
case compare x y of
LT -> False
EQ -> True
GT -> member x b || member x a
count :: Ord a => a -> Heap a -> Int
count _ E = 0
count x (T y a b) =
case compare x y of
LT -> 0
EQ -> 1 + count x b + count x a
GT -> count x b + count x a
toSeq :: (Ord a,S.Sequence seq) => Heap a -> seq a
toSeq h = tol h S.empty
where tol E rest = rest
tol (T x a b) rest = S.lcons x (tol b (tol a rest))
lookupM :: (Ord a, Monad m) => a -> Heap a -> m a
lookupM _ E = fail "SkewHeap.lookupM: XXX"
lookupM x (T y a b) =
case compare x y of
LT -> fail "SkewHeap.lookupM: XXX"
EQ -> return y
GT -> case lookupM x b `mplus` lookupM x a of
Nothing -> fail "SkewHeap.lookupM: XXX"
Just x -> return x
lookupAll :: (Ord a,S.Sequence seq) => a -> Heap a -> seq a
lookupAll x h = look h S.empty
where look E ys = ys
look (T y a b) ys =
case compare x y of
LT -> ys
EQ -> S.lcons y (look b (look a ys))
GT -> look b (look a ys)
fold :: Ord a => (a -> b -> b) -> b -> Heap a -> b
fold _ e E = e
fold f e (T x a b) = f x (fold f (fold f e a) b)
fold' :: Ord a => (a -> b -> b) -> b -> Heap a -> b
fold' _ e E = e
fold' f e (T x a b) = e `seq` f x $! (fold' f (fold' f e a) b)
fold1 :: Ord a => (a -> a -> a) -> Heap a -> a
fold1 _ E = error "SkewHeap.fold1: empty collection"
fold1 f (T x a b) = fold f (fold f x a) b
fold1' :: Ord a => (a -> a -> a) -> Heap a -> a
fold1' _ E = error "SkewHeap.fold1': empty collection"
fold1' f (T x a b) = fold' f (fold' f x a) b
filter :: Ord a => (a -> Bool) -> Heap a -> Heap a
filter _ E = E
filter p (T x a b)
| p x = T x (filter p a) (filter p b)
| otherwise = union (filter p a) (filter p b)
partition :: Ord a => (a -> Bool) -> Heap a -> (Heap a, Heap a)
partition _ E = (E, E)
partition p (T x a b)
| p x = (T x a' b', union a'' b'')
| otherwise = (union a' b', T x a'' b'')
where (a', a'') = partition p a
(b', b'') = partition p b
deleteMin :: Ord a => Heap a -> Heap a
deleteMin E = E
deleteMin (T _ a b) = union a b
deleteMax :: Ord a => Heap a -> Heap a
deleteMax h = case maxView h of
Nothing -> E
Just (_,h') -> h'
unsafeInsertMin :: Ord a => a -> Heap a -> Heap a
unsafeInsertMin x h = T x h E
unsafeAppend :: Ord a => Heap a -> Heap a -> Heap a
unsafeAppend E h = h
unsafeAppend (T x a b) h = T x (unsafeAppend b h) a
filterLT :: Ord a => a -> Heap a -> Heap a
filterLT y (T x a b) | x < y = T x (filterLT y a) (filterLT y b)
filterLT _ _ = E
filterLE :: Ord a => a -> Heap a -> Heap a
filterLE y (T x a b) | x <= y = T x (filterLE y a) (filterLE y b)
filterLE _ _ = E
filterGT :: Ord a => a -> Heap a -> Heap a
filterGT y h = C.unionList (collect h [])
where collect E hs = hs
collect h@(T x a b) hs
| x > y = h : hs
| otherwise = collect a (collect b hs)
filterGE :: Ord a => a -> Heap a -> Heap a
filterGE y h = C.unionList (collect h [])
where collect E hs = hs
collect h@(T x a b) hs
| x >= y = h : hs
| otherwise = collect b (collect a hs)
partitionLT_GE :: Ord a => a -> Heap a -> (Heap a, Heap a)
partitionLT_GE y h = (h', C.unionList hs)
where (h', hs) = collect h []
collect E hs = (E, hs)
collect h@(T x a b) hs
| x >= y = (E, h:hs)
| otherwise = let (a', hs') = collect a hs
(b', hs'') = collect b hs'
in (T x a' b', hs'')
partitionLE_GT :: Ord a => a -> Heap a -> (Heap a, Heap a)
partitionLE_GT y h = (h', C.unionList hs)
where (h', hs) = collect h []
collect E hs = (E, hs)
collect h@(T x a b) hs
| x > y = (E, h:hs)
| otherwise = let (a', hs') = collect a hs
(b', hs'') = collect b hs'
in (T x a' b', hs'')
partitionLT_GT :: Ord a => a -> Heap a -> (Heap a, Heap a)
partitionLT_GT y h = (h', C.unionList hs)
where (h', hs) = collect h []
collect E hs = (E, hs)
collect h@(T x a b) hs =
case compare x y of
GT -> (E, h:hs)
EQ -> let (a', hs') = collect a hs
(b', hs'') = collect b hs'
in (union a' b', hs'')
LT -> let (a', hs') = collect a hs
(b', hs'') = collect b hs'
in (T x a' b', hs'')
minView :: (Ord a, Monad m) => Heap a -> m (a, Heap a)
minView E = fail "SkewHeap.minView: empty heap"
minView (T x a b) = return (x, union a b)
minElem :: Ord a => Heap a -> a
minElem E = error "SkewHeap.minElem: empty collection"
minElem (T x _ _) = x
maxView :: (Ord a, Monad m) => Heap a -> m (a, Heap a)
maxView E = fail "SkewHeap.maxView: empty heap"
maxView (T x E E) = return (x, E)
maxView (T x a E) = return (y, T x a' E)
where Just (y, a') = maxView a
maxView (T x E a) = return (y, T x a' E)
where Just (y, a') = maxView a
maxView (T x a b)
| y >= z = return (y, T x a' b)
| otherwise = return (z, T x a b')
where Just (y, a') = maxView a
Just (z, b') = maxView b
-- warning: maxView and maxElem may disagree if root is equal to max!
maxElem :: Ord a => Heap a -> a
maxElem E = error "SkewHeap.maxElem: empty collection"
maxElem (T x E E) = x
maxElem (T _ a E) = maxElem a
maxElem (T _ E a) = maxElem a
maxElem (T _ a b) = findMax b (findLeaf a)
where findMax E m = m
findMax (T x E E) m
| m >= x = m
| otherwise = x
findMax (T _ a E) m = findMax a m
findMax (T _ E a) m = findMax a m
findMax (T _ a b) m = findMax a (findMax b m)
findLeaf E = error "SkewHeap.maxElem: bug"
findLeaf (T x E E) = x
findLeaf (T _ a E) = findLeaf a
findLeaf (T _ E a) = findLeaf a
findLeaf (T _ a b) = findMax b (findLeaf a)
foldr :: Ord a => (a -> b -> b) -> b -> Heap a -> b
foldr _ e E = e
foldr f e (T x a b) = f x (foldr f e (union a b))
foldr' :: Ord a => (a -> b -> b) -> b -> Heap a -> b
foldr' _ e E = e
foldr' f e (T x a b) = e `seq` f x $! (foldr' f e (union a b))
foldl :: Ord a => (b -> a -> b) -> b -> Heap a -> b
foldl _ e E = e
foldl f e (T x a b) = foldl f (f e x) (union a b)
foldl' :: Ord a => (b -> a -> b) -> b -> Heap a -> b
foldl' _ e E = e
foldl' f e (T x a b) = e `seq` foldl' f (f e x) (union a b)
foldr1 :: Ord a => (a -> a -> a) -> Heap a -> a
foldr1 _ E = error "SkewHeap.foldr1: empty collection"
foldr1 _ (T x E E) = x
foldr1 f (T x a b) = f x (foldr1 f (union a b))
foldr1' :: Ord a => (a -> a -> a) -> Heap a -> a
foldr1' _ E = error "SkewHeap.foldr1': empty collection"
foldr1' _ (T x E E) = x
foldr1' f (T x a b) = f x $! (foldr1' f (union a b))
foldl1 :: Ord a => (a -> a -> a) -> Heap a -> a
foldl1 _ E = error "SkewHeap.foldl1: empty collection"
foldl1 f (T x a b) = foldl f x (union a b)
foldl1' :: Ord a => (a -> a -> a) -> Heap a -> a
foldl1' _ E = error "SkewHeap.foldl1': empty collection"
foldl1' f (T x a b) = foldl' f x (union a b)
{- ???? -}
unsafeMapMonotonic :: Ord a => (a -> a) -> Heap a -> Heap a
unsafeMapMonotonic _ E = E
unsafeMapMonotonic f (T x a b) =
T (f x) (unsafeMapMonotonic f a) (unsafeMapMonotonic f b)
strict :: Heap a -> Heap a
strict h@E = h
strict h@(T _ l r) = strict l `seq` strict r `seq` h
strictWith :: (a -> b) -> Heap a -> Heap a
strictWith _ h@E = h
strictWith f h@(T x l r) = f x `seq` strictWith f l `seq` strictWith f r `seq` h
-- the remaining functions all use default definitions
fromSeq :: (Ord a,S.Sequence seq) => seq a -> Heap a
fromSeq = fromSeqUsingUnionSeq
insertSeq :: (Ord a,S.Sequence seq) => seq a -> Heap a -> Heap a
insertSeq = insertSeqUsingUnion
unionSeq :: (Ord a,S.Sequence seq) => seq (Heap a) -> Heap a
unionSeq = unionSeqUsingReduce
deleteSeq :: (Ord a,S.Sequence seq) => seq a -> Heap a -> Heap a
deleteSeq = deleteSeqUsingDelete
lookup :: Ord a => a -> Heap a -> a
lookup = lookupUsingLookupM
lookupWithDefault :: Ord a => a -> a -> Heap a -> a
lookupWithDefault = lookupWithDefaultUsingLookupM
unsafeInsertMax :: Ord a => a -> Heap a -> Heap a
unsafeInsertMax = unsafeInsertMaxUsingUnsafeAppend
unsafeFromOrdSeq :: (Ord a,S.Sequence seq) => seq a -> Heap a
unsafeFromOrdSeq = unsafeFromOrdSeqUsingUnsafeInsertMin
toOrdSeq :: (Ord a,S.Sequence seq) => Heap a -> seq a
toOrdSeq = toOrdSeqUsingFoldr
-- instance declarations
instance Ord a => C.CollX (Heap a) a where
{singleton = singleton; fromSeq = fromSeq; insert = insert;
insertSeq = insertSeq; unionSeq = unionSeq;
delete = delete; deleteAll = deleteAll; deleteSeq = deleteSeq;
null = null; size = size; member = member; count = count;
strict = strict;
structuralInvariant = structuralInvariant; instanceName _ = moduleName}
instance Ord a => C.OrdCollX (Heap a) a where
{deleteMin = deleteMin; deleteMax = deleteMax;
unsafeInsertMin = unsafeInsertMin; unsafeInsertMax = unsafeInsertMax;
unsafeFromOrdSeq = unsafeFromOrdSeq; unsafeAppend = unsafeAppend;
filterLT = filterLT; filterLE = filterLE; filterGT = filterGT;
filterGE = filterGE; partitionLT_GE = partitionLT_GE;
partitionLE_GT = partitionLE_GT; partitionLT_GT = partitionLT_GT}
instance Ord a => C.Coll (Heap a) a where
{toSeq = toSeq; lookup = lookup; lookupM = lookupM;
lookupAll = lookupAll; lookupWithDefault = lookupWithDefault;
fold = fold; fold' = fold'; fold1 = fold1; fold1' = fold1';
filter = filter; partition = partition; strictWith = strictWith}
instance Ord a => C.OrdColl (Heap a) a where
{minView = minView; minElem = minElem; maxView = maxView;
maxElem = maxElem; foldr = foldr; foldr' = foldr';
foldl = foldl; foldl' = foldl'; foldr1 = foldr1; foldr1' = foldr1';
foldl1 = foldl1; foldl1' = fold1'; toOrdSeq = toOrdSeq;
unsafeMapMonotonic = unsafeMapMonotonic}
instance Ord a => Eq (Heap a) where
xs == ys = C.toOrdList xs == C.toOrdList ys
instance (Ord a, Show a) => Show (Heap a) where
showsPrec = showsPrecUsingToList
instance (Ord a, Read a) => Read (Heap a) where
readsPrec = readsPrecUsingFromList
instance (Ord a, Arbitrary a) => Arbitrary (Heap a) where
arbitrary = sized (\n -> arbTree n)
where arbTree 0 = return E
arbTree n =
frequency [(1, return E),
(4, liftM3 sift arbitrary (arbTree (n `div` 2))
(arbTree (n `div` 4)))]
sift x (T y a b) E
| y < x = T y (sift x a b) E
sift x E (T y a b)
| y < x = T y E (sift x a b)
sift x s@(T y a b) t@(T z c d)
| y < x && y <= z = T y (sift x a b) t
| z < x = T z s (sift x c d)
sift x a b = T x a b
instance (Ord a, CoArbitrary a) => CoArbitrary (Heap a) where
coarbitrary E = variant 0
coarbitrary (T x a b) =
variant 1 . coarbitrary x . coarbitrary a . coarbitrary b
instance (Ord a) => Semigroup (Heap a) where
(<>) = union
instance (Ord a) => Monoid (Heap a) where
mempty = empty
mappend = (SG.<>)
mconcat = unionSeq
instance (Ord a) => Ord (Heap a) where
compare = compareUsingToOrdList
| robdockins/edison | edison-core/src/Data/Edison/Coll/SkewHeap.hs | mit | 14,726 | 0 | 18 | 4,381 | 6,991 | 3,560 | 3,431 | -1 | -1 |
{-# LANGUAGE PackageImports #-}
module Core.TypeCheck where
import Control.Applicative
import Control.Monad
import "mtl" Control.Monad.State
import Data.Either
import qualified Data.Map as M
import Data.Maybe
import Text.Printf
import Core.AST
import Core.PrettyPrinter
-- | Check types. Simple enough.
typecheck :: Module -> Module
typecheck m = let emptyState = TypeCheck M.empty M.empty []
(m', state) = runState (initMod m >> processModule m) emptyState
in case errors state of
[] -> m'
es -> error $ unlines $ reverse es
data TypeCheck = TypeCheck { typeMap :: M.Map String Type, kindMap :: M.Map String Kind, errors :: [String] }
type TypeCheckM a = State TypeCheck a
-- | Run an action inside its own inner scope. All types mapped inside this scope will not be accessible outside.
subScope :: TypeCheckM a -> TypeCheckM a
subScope action = do
oldTypes <- gets typeMap
oldKinds <- gets kindMap
r <- action
modify (\state -> state { typeMap = oldTypes, kindMap = oldKinds })
pure r
-- | Add a type to the typeMap.
addType :: String -> Type -> TypeCheckM ()
addType name ty = do
oldMap <- gets typeMap
modify (\state -> state { typeMap = M.insert name ty oldMap })
-- | Add a type to the kindMap.
addKind :: String -> Kind -> TypeCheckM ()
addKind name ty = do
oldMap <- gets kindMap
modify (\state -> state { kindMap = M.insert name ty oldMap })
-- | Add an id to the type or kind map.
addIdent :: Ident -> TypeCheckM ()
addIdent (Id name ty) = addType name ty
addIdent (TyId name ki) = addKind name ki
lookupType :: String -> TypeCheckM (Maybe Type)
lookupType name = M.lookup name <$> gets typeMap
-- | Add an error to the log.
reportError :: String -> TypeCheckM ()
reportError e = do
es <- gets errors
modify (\state -> state { errors = e:es })
-- | Extract types of top-level term identifiers and kinds of top-level types, and add them to the state..
initMod :: Module -> TypeCheckM ()
initMod (Module _ decls) = do
addType "dumpInt" (TyInt ~> TyInt)
forM_ decls $ \decl -> case decl of
DTerm ident _ -> addIdent ident
DType ident constrs -> do
addIdent ident
mapM_ addIdent constrs
processModule :: Module -> TypeCheckM Module
processModule (Module name decls) = Module name <$> mapM processDecl decls
processDecl :: Decl -> TypeCheckM Decl
processDecl decl@(DTerm ident expr) = do
ty <- subScope $ calculateType expr
case ty == typeOf ident of
True -> pure decl
False -> do
reportError $ printf "Type mismatch in top-level decl. Expected %s, got %s." (ppType $ typeOf ident) (ppType ty)
pure decl
processDecl (DType ident constructors) = pure $ (DType ident constructors) -- TODO: should probably check kinds here.
checkIdent :: Ident -> Expr Ident -> TypeCheckM Type
checkIdent (Id name ty1) expr = do
ty2 <- calculateType expr
case ty1 == ty2 of
True -> pure ty1
False -> do
reportError $ printf "Type mismatch. Identifier has type %s but the expression has type %s." (ppType ty1) (ppType ty2)
pure ty1
-- | Calculate the type of the given expression.
calculateType :: Expr Ident -> TypeCheckM Type
calculateType (V ident) = do
-- Lookup name in typeMap.
mty <- lookupType $ name ident
-- Make sure it's the same as the ident's.
case mty of
Just ty -> case ty == typeOf ident of
True -> pure ty
False -> do
reportError $ printf "Type mismatch. Identifier has type %s but the value in scope has type %s." (ppType $ typeOf ident) (ppType ty)
pure ty
Nothing -> do
reportError $ printf "Type lookup failed: %s." $ ppIdent ident
pure $ typeOf ident
calculateType (L lit) = pure TyInt
calculateType (l :@ r) = do
lTy <- calculateType l
case lTy of
-- Term application.
TyFun lTy2 rTy2 -> do
rTy <- calculateType r
case lTy2 == rTy of
True -> pure rTy2
False -> do
reportError $ printf "Type mismatch. Function expecting value of type %s but got type %s." (ppType lTy2) (ppType rTy)
pure $ rTy2
-- Type application.
TyForAll ident ty1 -> case r of
Type ty2 -> do
ki <- calculateKind ty2
case kindOf ident == ki of
True -> pure $ applyType (TyForAll ident ty1) ty2
False -> do
reportError $ printf "kind mismatch. Type application expecting type of kind %s but got kind %s." (ppKind $ kindOf ident) (ppKind ki)
pure ty2
expr -> error $ printf "Syntax error. Expected type for application, got other expression instead: %s" (ppExpr expr)
calculateType (Lam ident@(Id name ty) expr) = subScope $ do
addIdent ident
ty2 <- calculateType expr
pure $ ty ~> ty2
calculateType (Lam ident expr) = subScope $ do
addIdent ident
ty2 <- calculateType expr
pure $ TyForAll ident ty2
calculateType (Let rec bindings expr) = subScope $ do
forM_ bindings $ \(ident, bexpr) -> do
addType (name ident) =<< checkIdent ident bexpr
calculateType expr
calculateType (Case expr ty alts) = do
forM_ alts $ \(tag, binders, body) -> subScope $ do
mapM_ addIdent binders
ty2 <- calculateType body
unless (ty == ty2) $ reportError $ printf "Type mismatch. Case alt has type %s where %s is expected." (ppType ty2) (ppType ty)
pure ty
calculateType (Constr tag ty exprs)
| length exprs == getTypeArity ty = do
passed <- and <$> checkTypes ty exprs
if passed then pure $ returnType ty else error "Type mismatch in data constructor."
| length exprs > getTypeArity ty = error "Data constructor over-saturated."
| length exprs < getTypeArity ty = error "Unsaturated data constructor."
where
checkTypes :: Type -> [Expr Ident] -> TypeCheckM [Bool]
checkTypes (TyFun l r) (v:vs) = do
ety <- calculateType v
rest <- checkTypes r vs
pure $ (l == ety) : rest
checkTypes ty' (v:[]) = do
ety <- calculateType v
pure $ (ty' == ety) : []
checkTypes ty' [] = pure [] -- Nullary constructors.
checkTypes ty' vs = error $ printf "Foo: %s -- %s" (show ty') (show vs)
calculateType (PrimFun pf) = pure $ getTypePF pf
calculateType (Type ty) = error $ printf "Found type (%s) where term was expected." $ ppType ty
getTypePF :: PrimFun -> Type
getTypePF (PrimBinOp op) = TyInt ~> TyInt ~> TyInt
-- | Calculate the kind of a type.
calculateKind :: Type -> TypeCheckM Kind
calculateKind (TyFun l r) = pure KiStar
calculateKind (TyInt) = pure KiStar
calculateKind (TyVar ident) = pure $ kindOf ident
calculateKind (TyForAll ident ty) = calculateKind ty
calculateKind (TyAp tyL tyR) = do
kiL <- calculateKind tyL
case kiL of
KiFun l r -> do
kiR <- calculateKind tyR
case kiR == r of
True -> pure l
False -> do
reportError $ printf "Kind mismatch. Applying type of kind %s where %s is expected." (ppKind kiR) (ppKind r)
pure r
kind -> do
reportError $ printf "Attempting to apply type of kind %s to concrete type %s." (ppKind kind) (ppType tyL)
pure kind
| tcsavage/lazy-compiler | src/Core/TypeCheck.hs | mit | 7,477 | 0 | 24 | 2,126 | 2,315 | 1,107 | 1,208 | 158 | 11 |
{-# LANGUAGE ScopedTypeVariables #-}
--module JobServer (initializeJobServer, getJobServer, clearJobServer, runJobs, runJob, waitOnJobs,
-- printJobServerHandle, JobServerHandle, tryWaitOnJobs) where
import Control.Concurrent (newEmptyMVar, putMVar, takeMVar, tryTakeMVar, MVar, threadDelay, forkOS)
import Control.Exception.Base (assert)
import Control.Exception (catch, SomeException(..))
import Foreign.C.Types (CInt)
import System.Environment (getEnv, setEnv)
import System.Posix.IO (createPipe, fdWrite, fdRead, FdOption(..), setFdOption, closeFd)
import System.Posix.Types (Fd(..), ByteCount)
import System.IO (hPutStrLn, stderr)
newtype JobServerHandle a = JobServerHandle { unJobServerHandle :: (Fd, Fd, [MVar a]) } deriving (Show)
newtype Token = Token { unToken :: String } deriving (Eq, Show)
instance Show (MVar a)
initializeJobServer :: Int -> IO (JobServerHandle a)
initializeJobServer n = do
-- Create the pipe:
(readEnd, writeEnd) <- createPipe
assert_ $ readEnd >= 0
assert_ $ writeEnd >= 0
assert_ $ readEnd /= writeEnd
-- Make the read end of the pipe non-blocking:
setFdOption readEnd NonBlockingRead True
-- Write the tokens to the pipe:
byteCount <- fdWrite writeEnd tokens
assert_ $ countToInt byteCount == tokensToWrite
-- Set an environment variable to store the handle for
-- other programs that might use this server:
setEnv "MAKEFLAGS" $ show readEnd ++ ", " ++ show writeEnd
-- Return the read and write ends of the pipe:
return $ JobServerHandle (readEnd, writeEnd, [])
where tokens = concat $ map show $ take tokensToWrite [(1::Integer)..]
tokensToWrite = n-1
getJobServer :: IO (JobServerHandle a)
getJobServer = do flags <- getEnv "MAKEFLAGS"
let handle = handle' flags
return $ JobServerHandle $ (Fd $ handle !! 0, Fd $ handle !! 1, [])
where handle' flags = map convert (splitBy ',' flags)
convert a = read a :: CInt
clearJobServer :: JobServerHandle a -> IO ()
clearJobServer handle = safeCloseFd w >> safeCloseFd r
where safeCloseFd fd = catch (closeFd fd) (\(_ :: SomeException) -> return ())
(r, w, _) = unJobServerHandle handle
-- Given a list of IO () jobs, run them when a space on the job server is
-- available.
runJobs :: JobServerHandle a -> [IO a] -> IO [a]
runJobs _ [] = return []
runJobs _ [j] = do ret <- j
return [ret]
runJobs handle (j:jobs) = maybe runJob forkJob =<< getToken r
where
(r, w, _) = unJobServerHandle handle
forkJob token = do
--putStrLn $ "read " ++ unToken token ++ " from pipe. "
-- Fork new thread to run job:
mToken <- newEmptyMVar
mReturn <- newEmptyMVar
--putStrLn $ "fork process " ++ unToken token
-- consider using fork finally
-- consider putting thread id in handle so that it can be killed on error
_ <- forkOS $ runForkedJob mToken mReturn w j
putMVar mToken token
-- Run the rest of the jobs:
rets <- runJobs handle jobs
-- Wait on my forked job:
--putStrLn $ "waiting on " ++ unToken token
ret1 <- takeMVar mReturn
return $ ret1:rets
--putStrLn $ "reaped " ++ unToken returnedToken
runJob = do ret1 <- j
rets <- runJobs handle jobs
return $ ret1:rets
runJob :: JobServerHandle a -> IO a -> IO (JobServerHandle a)
runJob handle j = maybe runJob forkJob =<< getToken r
where
(r, w, mReturns) = unJobServerHandle handle
forkJob token = do
--putStrLn $ "read " ++ unToken token ++ " from pipe. "
-- Fork new thread to run job:
mToken <- newEmptyMVar
mReturn <- newEmptyMVar
--putStrLn $ "fork process " ++ unToken token
-- consider using fork finally
_ <- forkOS $ runForkedJob mToken mReturn w j
putMVar mToken token
return $ JobServerHandle (r, w, mReturns++[mReturn])
runJob = do ret <- j
mReturn <- newEmptyMVar
putMVar mReturn ret
return $ JobServerHandle (r, w, mReturns++[mReturn])
printJobServerHandle :: JobServerHandle a -> IO ()
printJobServerHandle handle = putStrLn $ "handle: (" ++ show r ++ ", " ++ show w ++ ", len " ++ show (length mvars) ++ ")"
where (r, w, mvars) = unJobServerHandle handle
runForkedJob :: MVar (Token) -> MVar (a) -> Fd -> IO a -> IO ()
runForkedJob mToken mReturn w job = do
token <- takeMVar mToken
--putStrLn $ "-- starting job with token: " ++ unToken token
ret <- job
--putStrLn $ "-- finished job with token: " ++ unToken token
-- Return the token:
returnToken w token
-- Signal that I have finished:
putMVar mReturn ret
return ()
-- Wait on job and return a list of the job's return once all job's have finished:
waitOnJobs :: JobServerHandle a -> IO [a]
waitOnJobs handle = mapM takeMVar mReturns
where (_, _, mReturns) = unJobServerHandle handle
-- Collect the return values of any jobs who have finished. Returns Nothing for
-- outstanding jobs:
tryWaitOnJobs :: JobServerHandle a -> IO [Maybe a]
tryWaitOnJobs handle = mapM tryTakeMVar mReturns
where (_, _, mReturns) = unJobServerHandle handle
-- Get a token if one is available, otherwise return Nothing:
getToken :: Fd -> IO (Maybe Token)
getToken fd = catch (readPipe) (\(_ :: SomeException) -> return Nothing)
where readPipe = do (token, byteCount) <- fdRead fd 1
assert_ $ countToInt byteCount == 1
return $ Just $ Token $ token
-- Return a token to the pipe:
returnToken :: Fd -> Token -> IO ()
returnToken fd token = do byteCount <- fdWrite fd (unToken token)
assert_ $ countToInt byteCount == 1
-- Convenient assert function:
assert_ :: Monad m => Bool -> m ()
assert_ c = assert c (return ())
-- Conversion helper for ByteCount type:
countToInt :: ByteCount -> Int
countToInt a = fromIntegral a
splitBy :: Char -> String -> [String]
splitBy delimiter = foldr f [[]]
where f c l@(x:xs) | c == delimiter = []:l
| otherwise = (c:x):xs
f _ [] = []
-- Main function:
main :: IO ()
main = do handle <- initializeJobServer 1
printJobServerHandle handle
returns <- runJobs handle jobs
putStrLn $ "returns: " ++ show returns
putStrLn "--------------------------------------------------------------------"
handle2 <- getJobServer
printJobServerHandle handle2
handle3 <- mapM' runJob handle2 jobs
printJobServerHandle handle3
returns2 <- waitOnJobs handle3
putStrLn $ "returns: " ++ show returns2
clearJobServer handle
where jobs = [exampleLongJob "A", exampleJob "B", exampleLongJob "C",
exampleJob "D", exampleJob "E", exampleJob "F",
exampleJob "G", exampleJob "H", exampleJob "I",
exampleJob "J", exampleJob "K", exampleJob "L"]
mapM' :: Monad m => (a -> m b -> m a) -> a -> [m b] -> m a
mapM' _ a [] = return a
mapM' f a (x:xs) = do newA <- f a x
mapM' f newA xs
exampleJob :: String -> IO (Int)
exampleJob n = do putStrLn $ ".... Running job: " ++ n
threadDelay 1000000
putStrLn $ ".... Finishing job: " ++ n
return 1
exampleLongJob :: String -> IO (Int)
exampleLongJob n = do putStrLn $ ".... Running job: " ++ n
threadDelay 10000000
putStrLn $ ".... Finishing job: " ++ n
return 2
| dinkelk/job-server | JobServer.hs | mit | 7,575 | 0 | 13 | 2,015 | 2,145 | 1,076 | 1,069 | 129 | 2 |
module ProgramOptions (
options, Options(..)
) where
import Data.Text.Read
import Options.Applicative
data Options = Options
{ word :: String
, seed :: Integer
, bytes :: Maybe String }
options :: Parser (Maybe Options)
options = flag' Nothing (long "version" <> hidden) <|> (Just <$> options')
where
options' = Options
<$> strOption
( long "word"
<> short 'w'
<> metavar "WORD"
<> help "The word to print the inits of" )
<*> option auto
( long "seed"
<> short 's'
<> metavar "SEED"
<> help "The seed for the random function" )
<*> optional (strOption
( long "bytes"
<> short 'b'
<> metavar "BYTES"
<> help "Limit the output to BYTES" ))
| sclausen/haskell-weighted-inits | src/ProgramOptions.hs | mit | 805 | 0 | 15 | 279 | 219 | 112 | 107 | 26 | 1 |
module Mish.Util where
import System.Random
choice :: [a] -> StdGen -> (a, StdGen)
choice [] _ = error "invalid list for choice"
choice l g = (l !! index, g')
where (index, g') = randomR (0, length l - 1) g
| halvorgb/mish | src/Mish/Util.hs | mit | 212 | 0 | 10 | 46 | 100 | 55 | 45 | 6 | 1 |
-- |
-- Module : $Header$
-- Description : Ohua namespaces
-- Copyright : (c) Justus Adam 2018. All Rights Reserved.
-- License : EPL-1.0
-- Maintainer : [email protected], [email protected]
-- Stability : experimental
-- Portability : portable
-- This source code is licensed under the terms described in the associated LICENSE.TXT file
--
--
{-# LANGUAGE DeriveLift, TemplateHaskell #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Ohua.Frontend.NS
( FunAnn(..)
, Imports
, Namespace
, emptyNamespace
, HasName(name)
, algoImports
, sfImports
, pragmas
, HasDecls(decls)
, Pragma(..)
, parsePragma
, Feature
) where
import Ohua.Prelude
import qualified Data.HashMap.Strict as HM
import qualified Language.Haskell.TH as TH (Exp(VarE, AppE))
import qualified Language.Haskell.TH.Syntax as TH (Lift, lift)
import Control.Category ((>>>))
import qualified Data.Text as T
import qualified Data.Char as C
data FunAnn tyExpr = FunAnn
{ argTypes :: [tyExpr]
, retType :: tyExpr
} deriving (Show, Eq, Ord, Functor, Foldable, Traversable, Generic, TH.Lift)
type Imports = [(NSRef, [Binding])]
type Feature = Text
data Pragma
= Feature Feature
| Other Text Text
deriving (Generic, Show, Eq, Ord, TH.Lift)
pragmaChar :: Char
pragmaChar = '#'
parsePragma :: MonadError Text m => Text -> m Pragma
parsePragma =
T.strip >>>
T.break C.isSpace >>> \case
(pname, T.stripStart -> t) ->
case pname of
"feature" -> pure $ Feature t
_
| null pname ->
throwError $
"Pragma name should not be empty " <> show (pname, t)
| otherwise -> pure $ Other pname t
instance (TH.Lift k, TH.Lift v) => TH.Lift (HM.HashMap k v) where
lift m = do
listE <- TH.lift (HM.toList m)
pure $ TH.VarE 'HM.fromList `TH.AppE` listE
-- | A namespace as defined by the ohua API. It has a name, a list of
-- dependencies and aliasings and some defined expressions (currently
-- constrained to lambdas/algos)
--
-- Because the layout of the namespace itself is considered unstable use lenses
-- instead to interact with namespaces. To create new namespaces first create an
-- empty one with 'emptyNamespace' and then populate it using lenses.
--
-- Note that the "Eq" instance here is mainly meant for testing purposes and
-- should otherwise not be relied upon.
data Namespace decl =
Namespace NSRef -- name
[Pragma]
Imports -- algo imports
Imports -- sf imports
(HM.HashMap Binding decl) -- declarations
deriving (Generic, Show, Eq, TH.Lift)
emptyNamespace :: NSRef -> Namespace decl
emptyNamespace name0 = Namespace name0 [] [] [] mempty
instance HasName (Namespace decls) NSRef where
name f (Namespace a b c d e) = (\a' -> Namespace a' b c d e) <$> f a
pragmas :: Lens' (Namespace decls) [Pragma]
pragmas f (Namespace a b c d e) = (\b' -> Namespace a b' c d e) <$> f b
algoImports :: Lens' (Namespace decls) Imports
algoImports f (Namespace a b c d e) = (\c' -> Namespace a b c' d e) <$> f c
sfImports :: Lens' (Namespace decls) Imports
sfImports f (Namespace a b c d e) = (\d' -> Namespace a b c d' e) <$> f d
instance HasDecls (Namespace decls) (Namespace decls') (HM.HashMap Binding decls) (HM.HashMap Binding decls') where
decls f (Namespace a b c d e) = Namespace a b c d <$> f e
| ohua-dev/ohua-core | core/src/Ohua/Frontend/NS.hs | epl-1.0 | 3,484 | 0 | 15 | 864 | 967 | 536 | 431 | -1 | -1 |
module Main where
import System (getArgs)
import IO
import List (isSuffixOf)
import Maybe (fromJust)
import Text.XML.HaXml.Types (Document(..),Content(..))
import Text.XML.HaXml.Parse (xmlParse,dtdParse)
import Text.XML.HaXml.Validate (validate)
import Text.XML.HaXml.Wrappers (fix2Args)
-- This is a fairly trivial application that reads a DTD from a file,
-- an XML document from another file (or stdin), and writes any validation
-- errors to stdout.
main = do
(dtdf,xmlf) <- fix2Args
dtdtext <- ( if dtdf=="-" then error "Usage: validate dtdfile [xmlfile]"
else readFile dtdf )
content <- ( if xmlf=="-" then getContents else readFile xmlf )
let dtd = dtdParse dtdf dtdtext
Document _ _ xml = xmlParse xmlf content
errs = validate (fromJust dtd) xml
mapM_ putStrLn errs
| jgoerzen/dtmconv | HaXml-1.12/src/tools/Validate.hs | gpl-2.0 | 843 | 0 | 12 | 181 | 223 | 127 | 96 | 18 | 3 |
module Interpolacion where
import GramaticaAbstracta
import Semantica
import SistemasEcuaciones
import FuncionesAuxiliaresSE
{-METODO DE NEWTON CON DIFERENCIAS DIVIDIDAS
NOTA: Este metodo tiene dos partes: Una que evalua un punto nuevo en el polinomio y da el resultado de la interpolacion y otra que
entrega el polinomio interpolante escrito por medio de nuestra gramatica de funciones. Esta basado en la implementacion hecha por
Santiago Rodriguez y Carolina Campillo en la practica del semestre 2010-1.
-}
--Funcion que halla el Polinomio Interpolante de Newton de una serie de puntos x con sus respectivas evaluaciones fx
newtonPolinomio :: [Double] -> [Double] -> Func
newtonPolinomio x fx = makePolN (coefB fx x n [] 0) x
where n = length fx -1
--Funcion que interpola un valor dadas una lista de puntos x y sus correspondientes fx usando el Polinomio Interpolante de Newton
newtonEvaluado :: [Double] -> [Double] -> Double -> Func
newtonEvaluado x fx v = eval (newtonPolinomio x fx) ('x', FConst v)
--Funcion que halla los b del polinomio por medio de las diferencias divididas (Basada en la practica referenciada)
coefB ::[Double]->[Double]->Int->[Double]->Int->[Double]
coefB fx x n fn i
| i<= n = coefB fx x n (fn++[(ff 0 i)]) (i+1)
| otherwise = fn
where ff j i
| i /= j = ((ff j (i-1)) - (ff (j+1) i))/((x !! j) - (x !! i))
| i == j = fx !! i
--Funcion que convierte la lista de doubles en una lista de Funciones Constantes
makeCons :: [Double] -> [Func]
makeCons [] = []
makeCons (b:bs) = [FPar (FConst b)] ++ makeCons bs
--Funcion que convierte la lista de valores de x en una lista de funciones de la forma (x - xi)
makeRes :: [Double] -> [Func]
makeRes [] = []
makeRes (x:xs) = [FPar (FRes (FVar 'x') (FConst x))] ++ makeRes xs
--Funcion que toma la lista de bi y la lista de las restas (x - xi) y las convierte en multiplicaciones
makeMultN :: [Func] -> [Func] -> Int -> Int -> [Func]
makeMultN [] [] 0 _ = []
makeMultN b x i n
| (i == 1) = [FMult (b!!i) (x!!(i-1))]
| (i < n) = ant ++ [FMult (b!!i) (foldl1 (FMult) (factors x i n))]
| (otherwise) = []
where ant = makeMultN b x (i-1) n
--Funcion que saca los factores que multiplican a un bi
factors :: [Func] -> Int -> Int -> [Func]
factors x i n
| (i == 1) = [x!!(i-1)]
| (i < n) = ant ++ [x!!(i-1)]
| (otherwise) = error "Fuera de rango"
where ant = factors x (i-1) n
--Funcion que toma una lista de funciones y las suma
makeSum :: [Func] -> Func
makeSum t = foldl1 (FSum) t
--Funcion que utiliza las funciones anteriores y da el Polinomio Interpolante de Newton sumando el termino b0 al resto de la suma
makePolN :: [Double] -> [Double] -> Func
makePolN b x = FSum ((makeCons b)!!0) (makeSum (makeMultN (makeCons b) (makeRes x) (length x -1) (length x)))
{-METODO DE LAGRANGE
NOTA: Este metodo tiene dos partes: Una que evalua un punto nuevo en el polinomio y da el resultado de la interpolacion y otra que
entrega el polinomio interpolante escrito por medio de nuestra gramatica de funciones. Esta basado en la implementacion hecha por
Santiago Rodriguez y Carolina Campillo en la practica del semestre 2010-1.
-}
--Funcion que halla el Polinomio Interpolante de Lagrange de una serie de puntos x con sus respectivas evaluaciones fx
lagrangePolinomio :: [Double] -> [Double] -> Func
lagrangePolinomio x fx = makePolL (makeMultL (coefL x n 0 []) (makeCons fx) n n)
where n = length x -1
--Funcion que interpola un valor dadas una lista de puntos x y sus correspondientes fx usando el Polinomio Interpolante de Lagrange
lagrangeEvaluado :: [Double] -> [Double] -> Double -> Func
lagrangeEvaluado x fx v = eval (lagrangePolinomio x fx) ('x',FConst v)
--Funcion que halla las funciones L del polinomio (Basada en la practica referenciada)
coefL :: [Double] -> Int -> Int -> [Func] -> [Func]
coefL x n i l
| i<=n = coefL x n (i+1) (l++[FPar (FDiv (prodArriba i 0 []) (prodAbajo i 0 []))])
| otherwise = l
where prodArriba i j li
| j==i = prodArriba i (j+1) li
| j<=n = prodArriba i (j+1) (li++[FPar (FRes (FVar 'x') (FConst(x!!j)))])
| otherwise = foldl1 (FMult) li
prodAbajo i j li
| j == i = prodAbajo i (j+1) li
| j <= n = prodAbajo i (j+1) (li++[FConst ((x!!i)-(x!!j))])
| otherwise = reduccion (foldl1 (FMult) li)
--Funcion que toma la lista de Li y la lista de los fx y las convierte en multiplicaciones
makeMultL :: [Func] -> [Func] -> Int -> Int -> [Func]
makeMultL [] [] _ _ = []
makeMultL l fx i n
| (i == 0) = [FMult (l!!0) (fx!!0)]
| (i <= n) = ant ++ [FMult (l!!i) (fx!!i)]
where ant = makeMultL l fx (i-1) n
--Funcion que utiliza las funciones anteriores y da el Polinomio Interpolante de Lagrange sumando los terminos de la forma Li(x)f(x)
makePolL :: [Func] -> Func
makePolL t = makeSum t
{-SPLINES LINEALES
NOTA: Este metodo tiene dos partes: Una que evalua un punto nuevo en el polinomio y da el resultado de la interpolacion y otra que
entrega el polinomio interpolante escrito por medio de nuestra gramatica de funciones. Esta basado en la implementacion hecha por
Santiago Rodriguez y Carolina Campillo en la practica del semestre 2010-1.
-}
--Funcion que halla la funcion por tramos de trazadores lineales dados unos puntos x y sus respectivos fx
spLinEcuaciones :: [Double] -> [Double] -> [String]
spLinEcuaciones x fx = ecsLin x fx n
where n = length x -1
--Funcion que dados unos puntos x, fx, y un valor a interpolar busca el tramo al que pertenece el valor y evalua la ecuacion
spLinEvaluado :: [Double] -> [Double] -> Double -> Func
spLinEvaluado x fx v = eval (regLin x fx (srchValue x v)) ('x',FConst v)
--Funcion que devuelve una lista con las ecuaciones lineales para cada tramo de la lista de puntos
ecsLin :: [Double] -> [Double] -> Int -> [String]
ecsLin x fx i
| (i == 1) = [(show (regLin x fx i)) ++ "para " ++ show (x!!(i-1)) ++ " <= x <= " ++ show (x!!i)]
| (i < n) = ant ++ [(show (regLin x fx i)) ++ "para " ++ show (x!!(i-1)) ++ " <= x <= " ++ show (x!!i)]
where n = length x
ant = ecsLin x fx (i-1)
--Funcion que hace la regresion lineal para un tramo i. Basada en la practica referenciada
regLin :: [Double] -> [Double] -> Int -> Func
regLin x fx i = FSum (FConst (fst(ifx))) (FMult (FPar (FConst m)) (FPar (FRes (FVar 'x') (FConst (fst(ix))))))
where m = (fst(ifx) - snd(ifx))/(fst(ix)-snd(ix))
ix = fst(par)
ifx = (fx!!(snd(par)),fx!!(snd(par) +1))
par = srchIndex x i
--Funcion que busca el indice del tramo al que pertenece una ecuacion. Basada en la practica referenciada
srchIndex :: [Double] -> Int -> ((Double,Double),Int)
srchIndex x i
| n1 /= 0 = ((last(fst(mitd)),head(snd(mitd))),n)
| otherwise = ((head(snd(mitd)),(snd(mitd)!!1)),n)
where mitd = break (>=(x!!i)) x
n = if(n1==0) then (n1) else (n1-1)
n1 = length(fst(mitd))
--Funcion que dado un valor de x y una lista de puntos de x, busca a que tramo pertenece. Basada en la practica referenciada
srchValue :: [Double] -> Double -> Int
srchValue x v = if (n1 == 0) then (n1+1) else (n1)
where n1 = length (fst mitd)
mitd = break (>=v) x
{-FUNCIONES GENERALES DE SPLINES CUADRATICOS Y CUBICOS
-}
--Funcion que halla los terminos independientes de las ecuaciones de empalme. Basada en la practica referenciada
empalB :: [Double] -> [Double] -> Int -> Int -> [Double]
empalB fx l i n
| i<=(n-1) = empalB fx (l++[fx!!i,fx!!i]) (i+1) n
| otherwise = l
--Funcion que halla los terminos independientes de las ecuaciones de los extremos. Basada en la practica referenciada
extB :: [Double] -> Int -> [Double]
extB fx n = [fx!!0,fx!!n]
{-SPLINES CUADRATICOS
NOTA: Este metodo tiene dos partes: Una que evalua un punto nuevo en el polinomio y da el resultado de la interpolacion y otra que
entrega el polinomio interpolante escrito por medio de nuestra gramatica de funciones. Esta basado en la implementacion hecha por
Santiago Rodriguez y Carolina Campillo en la practica del semestre 2010-1.
-}
--Funcion que retorna los valores de los coeficientes
coefSpCuad :: [Double] -> [Double] -> [(Integer,Double)]
coefSpCuad x fx = eGaussPParcial (matrizSpCuad x fx) (fromIntegral (length x -1)*3)
--Funcion que arma la matriz aumentada del sistema de ecuaciones
matrizSpCuad :: [Double] -> [Double] -> Matriz
matrizSpCuad x fx = leerMatrizAu (fromIntegral (length x -1)* 3) (sistmEcSpCuad x fx)
--Funcion que une todas las ecuaciones del sistema en una lista sencilla. Basada en la practica referenciada
sistmEcSpCuad :: [Double] -> [Double] -> [Double]
sistmEcSpCuad x fx = (empalCuad x fx [] 1 n neq)++(extCuad x fx neq n)++(derCuad x n neq [] 1)++(sdaDerCuad neq)
where n = length x -1
neq = (n)*3
--Funcion que halla las ecuaciones de empalme. Basada en la practica referenciada
empalCuad :: [Double] -> [Double] -> [Double] -> Int -> Int -> Int -> [Double]
empalCuad x fx l i n neq
| i<=(n-1) = empalCuad x fx (l++(take ((i-1)*3) [0,0..])++[(x!!i)^2,x!!i,1]++(take (neq-3*i) [0,0..])++[b1]++(take (3*i) [0,00..])++[(x!!i)^2,x!!i,1]++(take (neq -3-(i)*3) [0,0..])++[b2]) (i+1) n neq
| otherwise = l
where b = empalB fx [] 1 n
b1 = if (i==1) then (b!!(i-1)) else (b!!i)
b2 = if (i==1) then (b!!i) else (b!!(i+1))
--Funcion que halla las ecuaciones de los extremos. Basada en la practica referenciada
extCuad :: [Double] -> [Double] -> Int -> Int -> [Double]
extCuad x fx neq n = ([(x!!0)^2,x!!0,1]++(take zeros [0,0..])++[b!!0]++(take zeros [0,0..])++[(x!!n)^2,x!!n,1]++[b!!1])
where zeros = div (neq*2-6) 2
b = extB fx n
--Funcion que halla las funciones de las derivadas en los nodos internos y las iguala. Basada en la practica referenciada
derCuad :: [Double] -> Int -> Int -> [Double] ->Int -> [Double]
derCuad x n neq l i
| i<=(n-1) = derCuad x n neq (l++(take ((i-1)*3) [0,0..])++[2*(x!!i),1,0,-2*(x!!i),-1,0]++(take ((neq -3 -i*3)+1) [0,0..])) (i+1)
| otherwise = l
--Funcion que halla la segunda derivada de la primera ecuacion igualada a cero. Basada en la practica referenciada
sdaDerCuad :: Int -> [Double]
sdaDerCuad neq = [1]++(take neq [0,0..])
{-SPLINES CUBICOS
NOTA: Este metodo tiene dos partes: Una que evalua un punto nuevo en el polinomio y da el resultado de la interpolacion y otra que
entrega el polinomio interpolante escrito por medio de nuestra gramatica de funciones. Esta basado en la implementacion hecha por
Santiago Rodriguez y Carolina Campillo en la practica del semestre 2010-1.
-}
--Funcion que retorna los valores de los coeficientes
coefSpCub :: [Double] -> [Double] -> [(Integer,Double)]
coefSpCub x fx = eGaussPParcial (matrizSpCub x fx) (fromIntegral (length x -1)*4)
--Funcion que arma la matriz aumentada del sistema de ecuaciones
matrizSpCub :: [Double] -> [Double] -> Matriz
matrizSpCub x fx = leerMatrizAu (fromIntegral (length x -1)* 4) (sistmEcSpCub x fx)
--Funcion que une todas las ecuaciones del sistema en una lista sencilla. Basada en la practica referenciada
sistmEcSpCub :: [Double] -> [Double] -> [Double]
sistmEcSpCub x fx = (empalCub x fx [] 1 n neq)++(extCub x fx neq n)++(derCub x n neq [] 1)++(sdaDerCub x n neq [] 1)++(sdaDerExtCub x neq n)
where n = length x -1
neq = (n)*4
--Funcion que halla las ecuaciones de empalme. Basada en la practica referenciada
empalCub :: [Double] -> [Double] -> [Double] -> Int -> Int -> Int -> [Double]
empalCub x fx l i n neq
| i<=(n-1) = empalCub x fx (l++(take ((i-1)*4) [0,0..])++[(x!!i)^3,(x!!i)^2,x!!i,1]++(take (neq-4*i) [0,0..])++[b1]++(take (4*i) [0,0..])++[(x!!i)^3,(x!!i)^2,x!!i,1]++(take (neq -4-(i)*4) [0,0..])++[b2]) (i+1) n neq
| otherwise = l
where b = empalB fx [] 1 n
b1 = if (i==1) then (b!!(i-1)) else (b!!i)
b2 = if (i==1) then (b!!i) else (b!!(i+1))
--Funcion que halla las ecuaciones de los extremos. Basada en la practica referenciada
extCub :: [Double] -> [Double] -> Int -> Int -> [Double]
extCub x fx neq n = ([(x!!0)^3,(x!!0)^2,x!!0,1]++(take zeros [0,0..])++[b!!0]++(take zeros [0,0..])++[(x!!n)^3,(x!!n)^2,x!!n,1]++[b!!1])
where zeros = div (neq*2-8) 2
b = extB fx n
--Funcion que halla las funciones de las derivadas en los nodos internos y las iguala. Basada en la practica referenciada
derCub :: [Double] -> Int -> Int -> [Double] -> Int -> [Double]
derCub x n neq l i
| i<=(n-1) = derCub x n neq (l++(take ((i-1)*4) [0,0..])++[3*(x!!i)^2,2*(x!!i),1,0,-3*(x!!i)^2,-2*(x!!i),-1,0]++(take ((neq -4 -i*4)+1) [0,0..])) (i+1)
| otherwise = l
--Funcion que halla las funciones de las segundas derivadas en los nodos internos y las iguala. Basada en la practica referenciada
sdaDerCub :: [Double] -> Int -> Int -> [Double] -> Int -> [Double]
sdaDerCub x n neq l i
| i<=(n-1) = sdaDerCub x n neq (l++(take ((i-1)*4) [0,0..])++[6*(x!!i),2,0,0,-6*(x!!i),-2,0,0]++(take ((neq -4 -i*4)+1) [0,0..])) (i+1)
| otherwise = l
--Funcion que halla las segundas derivadas de los extremos y las iguala a cero. Basada en la practica referenciada
sdaDerExtCub :: [Double] -> Int -> Int -> [Double]
sdaDerExtCub x neq n = [6*(x!!0),2,0,0]++(take ((neq*2 -8)+1) [0,0..])++[6*(x!!n),2,0,0,0]
--Valores para prueba
x = [2.3,2.4,2.5,2.6]
fx = [17.997784,15.850776,14.140572,12.720153]
x2 :: [Double]
x2 = [-1,1,2,4]
fx2 :: [Double]
fx2 = [4,1,3,-2]
x3 = [3.2,3.4,3.6,3.8]
fx3 = [-6.421269,-7.88968,-9.411188,-10.990671]
x4 :: [Double]
x4 = [1,3,4,5]
fx4 = [3,1,3.5,2] | dmuneras/LambdaMethods | src/Interpolacion.hs | gpl-2.0 | 14,045 | 0 | 21 | 3,226 | 5,464 | 2,962 | 2,502 | 164 | 3 |
{-# LANGUAGE MultiParamTypeClasses #-}
module NFA.Compress.Inter where
import NFA.Compress.Compressed
import NFA.Compress.Instance
import NFA.Compress.Data
import NFA.Compress.Look
import NFA.Compress.Roll
import NFA.Compress.Config
import qualified Fun.Table as F
import Autolib.Reporter
import Autolib.ToDoc
import qualified Challenger as C
import Inter.Types
import Inter.Quiz
import Autolib.Size
import Data.Typeable
import Data.Array (array)
instance OrderScore DFA_Compress where
scoringOrder _ = Increasing
instance C.Partial DFA_Compress Instance Compressed where
describe p i = vcat
[ text "Gesucht ist eine komprimierte Darstellung der Automatentabelle"
, nest 4 $ F.prettyTafel2 $ tafel $ original i
, text "Das next/check-Array soll höchstens"
, text "die Länge" <+> toDoc ( max_size i ) <+> text "haben."
]
initial p i = NFA.Compress.Compressed.example
partial p i b = do
C.verify p b
sequence_ $ do
(x, zs) <- zip [ 0 .. ] $ original i
(y, z ) <- zip [ 0 .. ] zs
return $ silent $ do
inform $ hsep [ text "lookup" , toDoc (x,y), text "=?=", toDoc z ]
t <- NFA.Compress.Look.up b x y
assert ( t == z ) $ text "Wert ist korrekt?"
inform $ text "alle Werte sind korrekt."
total p i b = do
assert ( size b <= max_size i )
$ text "Lösung ist klein genug?"
tafel zss =
let r = fromIntegral $ length zss
c = fromIntegral $ length $ head zss
in F.Tafel2 $ array ((0,0), (r-1,c-1)) $ do
( x, zs ) <- zip [0..] zss
( y, z ) <- zip [ 0..] zs
return ((x,y), fromIntegral z)
make_fixed :: Make
make_fixed = direct DFA_Compress NFA.Compress.Instance.example
instance Generator DFA_Compress Config Instance where
generator p conf key = roll conf
instance Project DFA_Compress Instance Instance where
project p = id
make_quiz :: Make
make_quiz = quiz DFA_Compress NFA.Compress.Config.example
| Erdwolf/autotool-bonn | src/NFA/Compress/Inter.hs | gpl-2.0 | 2,023 | 11 | 18 | 519 | 666 | 349 | 317 | 54 | 1 |
{-# LANGUAGE NoMonomorphismRestriction #-}
{-# LANGUAGE OverloadedStrings #-}
module MusicBrainz.API.Recording where
import Control.Applicative
import Control.Lens
import Data.Text (Text)
import Text.Digestive
import qualified Data.Map as Map
import qualified Data.Set as Set
import MusicBrainz hiding (coreRef)
import MusicBrainz.API
import qualified MusicBrainz.API.Common as Common
import MusicBrainz.API.JSON
import qualified MusicBrainz.Data as MB
import qualified MusicBrainz.Data.Recording as MB
--------------------------------------------------------------------------------
tree :: Form Text MusicBrainz (Tree Recording)
tree = RecordingTree <$> "recording" .: recording
<*> relationships
<*> annotation
<*> setOf isrcF
<*> setOf puidF
where
recording = Recording <$> name
<*> comment
<*> artistCreditRef
<*> duration
puidF = validate (maybe (Error "Could not parse PUID") Success . preview puid) $ string Nothing
--------------------------------------------------------------------------------
create :: Form Text MusicBrainz (RefObject (Revision Recording))
create = Common.create tree
--------------------------------------------------------------------------------
findLatest :: Form Text MusicBrainz (Map.Map (Ref Recording) (CoreEntity Recording))
findLatest = Common.findLatest
--------------------------------------------------------------------------------
viewRevision :: Form Text MusicBrainz (CoreEntity Recording)
viewRevision = runApi $
MB.viewRevision <$> revision
--------------------------------------------------------------------------------
viewRelationships :: Form Text MusicBrainz (Set.Set LinkedRelationship)
viewRelationships = Common.viewRelationships recordingRevision
--------------------------------------------------------------------------------
recordingRevision :: Form Text MusicBrainz (Ref (Revision Recording))
recordingRevision = revision
--------------------------------------------------------------------------------
viewAnnotation :: Form Text MusicBrainz Annotation
viewAnnotation = Common.viewAnnotation recordingRevision
--------------------------------------------------------------------------------
update :: Form Text MusicBrainz (RefObject (Revision Recording))
update = Common.update tree
--------------------------------------------------------------------------------
merge :: Form Text MusicBrainz (RefObject (Revision Recording))
merge = Common.merge
--------------------------------------------------------------------------------
getRevision :: Form Text MusicBrainz (Entity (Revision Recording))
getRevision = Common.getRevision
--------------------------------------------------------------------------------
findRecordingTracks :: Form Text MusicBrainz [MB.RecordingUse]
findRecordingTracks = runApi $ MB.findRecordingTracks <$> "recording" .: coreRef
--------------------------------------------------------------------------------
findByArtist :: Form Text MusicBrainz [CoreEntity Recording]
findByArtist = runApi $ MB.findByArtist <$> "artist" .: coreRef
--------------------------------------------------------------------------------
findByIsrc :: Form Text MusicBrainz [CoreEntity Recording]
findByIsrc = runApi $ MB.findByIsrc <$> "isrc" .: isrcF
--------------------------------------------------------------------------------
viewIsrcs :: Form Text MusicBrainz (Set.Set ISRC)
viewIsrcs = runApi $ MB.viewIsrcs <$> recordingRevision
--------------------------------------------------------------------------------
isrcF :: Monad m => Form Text m ISRC
isrcF = validate (maybe (Error "Could not parse ISRC") Success . preview isrc) $
text Nothing
| metabrainz/musicbrainz-data-service | src/MusicBrainz/API/Recording.hs | gpl-2.0 | 3,900 | 0 | 13 | 589 | 708 | 383 | 325 | 56 | 1 |
{-# LANGUAGE Arrows #-}
{-# LANGUAGE MultiWayIf #-}
-- | This module defines the time transformation functions.
module Game.Time where
-- External imports
import FRP.Yampa
import FRP.Yampa.Extra
-- Internal iports
import Game.Input
import Game.ObjectSF
-- | Time transformation that allows time to be reversed.
timeProgressionReverse :: SF Controller (DTime -> DTime)
timeProgressionReverse = proc (c) -> do
-- NOTE: Another option is slowDown
let rev = if controllerReverse c then ((-1)*) else id
returnA -< rev
-- | Time transformation that slows down time upon request.
timeProgressionSlowDown :: SF Controller (DTime -> DTime)
timeProgressionSlowDown = proc (c) -> do
rec let slow = controllerReverse c
unit = if | power' >= 0 && slow -> (-1)
| power' >= maxPower -> 0
| otherwise -> 1
power <- (maxPower +) ^<< integral -< unit
let power' = min maxPower (max 0 power)
dtF = if slow && (power' > 0) then (0.1*) else id
returnA -< dtF
where
maxPower :: Double
maxPower = 5
-- | Time transformation that can halt time for an object.
timeProgressionHalt :: SF ObjectInput (DTime -> DTime)
timeProgressionHalt = constant id &&& mustHalt
||> constant (const 0) &&& after 25 ()
||> timeProgressionHalt
where
mustHalt = (controllerHalt . userInput) ^>> edge
| keera-studios/pang-a-lambda | src/Game/Time.hs | gpl-3.0 | 1,421 | 2 | 18 | 372 | 363 | 194 | 169 | 28 | 4 |
{-# LANGUAGE OverloadedLists, QuasiQuotes #-}
module Nirum.Constructs.ServiceSpec where
import Data.String.QQ (s)
import Data.Map.Strict as Map (fromList)
import Test.Hspec.Meta
import Nirum.Constructs.Annotation
import Nirum.Constructs.Annotation.Internal
import Nirum.Constructs.Docs (toCode)
import Nirum.Constructs.Service (Method (Method), Parameter (Parameter))
import Nirum.Constructs.TypeExpression ( TypeExpression ( ListModifier
, OptionModifier
)
)
import Util (singleDocs)
spec :: Spec
spec = do
let methodAnno = singleton $ Annotation "http" $ Map.fromList
[ ("method", Text "GET")
, ("path", Text "/ping/")
]
let docsAnno = singleDocs "docs..."
describe "Parameter" $
specify "toCode" $ do
toCode (Parameter "dob" "date" empty) `shouldBe` "date dob,"
toCode (Parameter "dob" "date" docsAnno) `shouldBe`
"date dob,\n# docs..."
describe "Method" $
specify "toCode" $ do
toCode (Method "ping" [] (Just "bool")
Nothing
empty) `shouldBe`
"bool ping (),"
toCode (Method "ping" [] (Just "bool")
Nothing
methodAnno) `shouldBe`
"@http(method = \"GET\", path = \"/ping/\")\nbool ping (),"
toCode (Method "ping" [] (Just "bool")
Nothing
docsAnno) `shouldBe`
"bool ping (\n # docs...\n),"
toCode (Method "ping" [] (Just "bool")
(Just "ping-error")
empty) `shouldBe`
"bool ping () throws ping-error,"
toCode (Method "ping" [] (Just "bool")
(Just "ping-error")
docsAnno) `shouldBe`
"bool ping (\n # docs...\n) throws ping-error,"
toCode (Method "ping" [] (Just "bool")
Nothing
methodAnno) `shouldBe`
"@http(method = \"GET\", path = \"/ping/\")\nbool ping (),"
toCode (Method "ping" [] (Just "bool")
(Just "ping-error")
methodAnno) `shouldBe`
"@http(method = \"GET\", path = \"/ping/\")\n\
\bool ping () throws ping-error,"
toCode (Method "get-user"
[Parameter "user-id" "uuid" empty]
(Just $ OptionModifier "user")
Nothing
empty) `shouldBe`
"user? get-user (uuid user-id),"
toCode (Method "get-user"
[Parameter "user-id" "uuid" empty]
(Just $ OptionModifier "user")
Nothing
docsAnno) `shouldBe`
"user? get-user (\n # docs...\n uuid user-id,\n),"
toCode (Method "get-user"
[Parameter "user-id" "uuid" empty]
(Just $ OptionModifier "user")
(Just "get-user-error")
empty) `shouldBe`
"user? get-user (uuid user-id) throws get-user-error,"
toCode (Method "get-user"
[Parameter "user-id" "uuid" empty]
(Just $ OptionModifier "user")
(Just "get-user-error")
docsAnno) `shouldBe` [s|
user? get-user (
# docs...
uuid user-id,
) throws get-user-error,|]
toCode (Method "get-user"
[Parameter "user-id" "uuid" $
singleDocs "param docs..."]
(Just $ OptionModifier "user")
Nothing
empty) `shouldBe`
"user? get-user (\n uuid user-id,\n # param docs...\n),"
toCode (Method "get-user"
[Parameter "user-id" "uuid" $
singleDocs "param docs..."]
(Just $ OptionModifier "user")
Nothing
docsAnno) `shouldBe` [s|
user? get-user (
# docs...
uuid user-id,
# param docs...
),|]
toCode (Method "get-user"
[Parameter "user-id" "uuid" $
singleDocs "param docs..."]
(Just $ OptionModifier "user")
(Just "get-user-error")
empty) `shouldBe` [s|
user? get-user (
uuid user-id,
# param docs...
) throws get-user-error,|]
toCode (Method "get-user"
[Parameter "user-id" "uuid" $
singleDocs "param docs..."]
(Just $ OptionModifier "user")
(Just "get-user-error")
docsAnno) `shouldBe` [s|
user? get-user (
# docs...
uuid user-id,
# param docs...
) throws get-user-error,|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" empty
, Parameter "keyword" "text" empty
]
(Just $ ListModifier "post")
Nothing
empty) `shouldBe`
"[post] search-posts (\n uuid blog-id,\n text keyword,\n),"
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" empty
, Parameter "keyword" "text" empty
]
(Just $ ListModifier "post")
Nothing
docsAnno) `shouldBe` [s|
[post] search-posts (
# docs...
uuid blog-id,
text keyword,
),|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" empty
, Parameter "keyword" "text" empty
]
(Just $ ListModifier "post")
(Just "search-posts-error")
empty) `shouldBe` [s|
[post] search-posts (
uuid blog-id,
text keyword,
) throws search-posts-error,|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" empty
, Parameter "keyword" "text" empty
]
(Just $ ListModifier "post")
(Just "search-posts-error")
docsAnno) `shouldBe` [s|
[post] search-posts (
# docs...
uuid blog-id,
text keyword,
) throws search-posts-error,|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" $
singleDocs "blog id..."
, Parameter "keyword" "text" $
singleDocs "keyword..."
]
(Just $ ListModifier "post")
Nothing
empty) `shouldBe` [s|
[post] search-posts (
uuid blog-id,
# blog id...
text keyword,
# keyword...
),|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" $
singleDocs "blog id..."
, Parameter "keyword" "text" $
singleDocs "keyword..."
]
(Just $ ListModifier "post")
Nothing
docsAnno) `shouldBe` [s|
[post] search-posts (
# docs...
uuid blog-id,
# blog id...
text keyword,
# keyword...
),|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" $
singleDocs "blog id..."
, Parameter "keyword" "text" $
singleDocs "keyword..."
]
(Just $ ListModifier "post")
(Just "search-posts-error")
empty) `shouldBe` [s|
[post] search-posts (
uuid blog-id,
# blog id...
text keyword,
# keyword...
) throws search-posts-error,|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" $
singleDocs "blog id..."
, Parameter "keyword" "text" $
singleDocs "keyword..."
]
(Just $ ListModifier "post")
(Just "search-posts-error")
docsAnno) `shouldBe` [s|
[post] search-posts (
# docs...
uuid blog-id,
# blog id...
text keyword,
# keyword...
) throws search-posts-error,|]
toCode (Method "search-posts"
[ Parameter "blog-id" "uuid" $
singleDocs "blog id..."
, Parameter "keyword" "text" $
singleDocs "keyword..."
]
(Just $ ListModifier "post")
(Just "search-posts-error")
(docsAnno `union` methodAnno)) `shouldBe` [s|
@http(method = "GET", path = "/ping/")
[post] search-posts (
# docs...
uuid blog-id,
# blog id...
text keyword,
# keyword...
) throws search-posts-error,|]
| spoqa/nirum | test/Nirum/Constructs/ServiceSpec.hs | gpl-3.0 | 10,056 | 0 | 16 | 4,861 | 1,567 | 819 | 748 | 166 | 1 |
{-# LANGUAGE NamedFieldPuns #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE ImplicitParams #-}
{-# LANGUAGE ConstraintKinds #-}
{-# LANGUAGE QuasiQuotes #-}
{-# LANGUAGE Rank2Types #-}
import Control.Applicative ((<$>))
import Control.Monad (forM_, when, unless)
import Control.Monad.Reader (asks)
import Data.Default (Default(def))
import Data.Maybe (fromMaybe, isNothing)
import qualified Data.Text as Text
import qualified Data.Text.IO as Text
import Data.Time (UTCTime)
import Data.Time.Git (io_approxidate, posixToUTC)
import System.IO (stderr, hPutStrLn)
import Text.RawString.QQ (r)
import Text.Regex.Posix ((=~))
import Text.XML.Light (showTopElement)
import System.Console.GetOpt (OptDescr(Option), ArgDescr(NoArg))
import UI.Command (Application(appName, appVersion, appAuthors, appProject,
appCmds, appShortDesc, appLongDesc,
appCategories, appBugEmail,
appOptions, appProcessConfig),
App,
Command(cmdName, cmdHandler, cmdShortDesc, cmdCategory),
appMainWithOptions, defCmd, appArgs, appConfig)
import Repo (Project(Project, projectName),
runRepo, repoStateDir, repoSnapshotsDir,
Project,
readManifest, readManifest_, snapshotManifest,
getSnapshots, snapshotProjects, renderSnapshot,
tryCheckoutSnapshot, snapshotByDate, toFullSnapshot,
readSnapshotByName, getHeadsSnapshot, resolveSnapshot,
saveSnapshotByName, removeSnapshotByName)
import Repo.Utils (io)
-- TODO: something better than this
warn :: String -> IO ()
warn = hPutStrLn stderr . ("Warning: " ++)
mustParseDate :: String -> IO UTCTime
mustParseDate date = do
parsed <- fmap posixToUTC <$> io_approxidate date
return $ fromMaybe (error $ "can't recognize date \"" ++ date ++ "\"") parsed
isValidSnapshotName :: String -> Bool
isValidSnapshotName name = not $ name =~ regexp
where regexp :: String
regexp = [r|^\.|\.\.|[\/:~^[:cntrl:][:space:]]|]
mainCategory :: String
mainCategory = "Working with snapshots"
data Options = Options { forceDate :: Bool
, overwriteSnapshot :: Bool
, resolveRefNames :: Bool
}
instance Default Options where
def = Options { forceDate = False
, overwriteSnapshot = False
, resolveRefNames = False
}
app :: Application (Options -> Options) Options
app = def' { appName = "repo-snapshot"
, appVersion = "0.1"
, appAuthors = ["Aliaksey Artamonau <[email protected]>"]
, appShortDesc = "short description"
, appLongDesc = "long description"
, appProject = "repo-utils"
, appCategories = ["foo", mainCategory]
, appCmds = [listCmd, checkoutCmd, saveCmd,
deleteCmd, showCmd, exportCmd]
, appBugEmail = "[email protected]"
, appOptions = options
, appProcessConfig = processConfig
}
where processConfig :: Options -> [Options -> Options] -> IO Options
processConfig z = return . foldl (flip ($)) z
def' :: Application (Options -> Options) Options
def' = def
options :: [OptDescr (Options -> Options)]
options = [ forceDateOpt
, overwriteSnapshot
, resolveRefNames
]
where forceDateOpt = Option "d" ["--force-date"]
(NoArg $ \opts -> opts { forceDate = True })
"interpret command argument as a date"
overwriteSnapshot = Option "F" ["--overwrite-snapshot"]
(NoArg $ \opts -> opts { overwriteSnapshot = True })
"overwrite snapshot if it already exists"
resolveRefNames = Option "r" ["--resolve-refnames"]
(NoArg $ \opts -> opts { resolveRefNames = True })
"resolve reference names before saving snapshot"
argsExistingSnapshot :: App Options String
argsExistingSnapshot = do
args <- appArgs
case args of
[name] ->
runRepo $ do
snapshotsDir <- asks repoSnapshotsDir
io $ do
snapshots <- getSnapshots snapshotsDir
unless (name `elem` snapshots) $
error $ "unknown snapshot '" ++ name ++ "'"
return name
_ ->
-- TODO: would be helpful to be able to show help here
error "bad arguments"
listCmd :: Command Options
listCmd = defCmd { cmdName = "list"
, cmdHandler = io listHandler
, cmdCategory = mainCategory
, cmdShortDesc = "List known snapshot"
}
listHandler :: IO ()
listHandler = runRepo $ do
snapshotsDir <- asks repoSnapshotsDir
io $ mapM_ putStrLn =<< getSnapshots snapshotsDir
checkoutCmd :: Command Options
checkoutCmd = defCmd { cmdName = "checkout"
, cmdHandler = checkoutHandler
, cmdShortDesc = "Checkout snapshot by name or date"
, cmdCategory = mainCategory
}
checkoutHandler :: App Options ()
checkoutHandler = do
args <- appArgs
options <- appConfig
runRepo $ do
stateDir <- asks repoStateDir
snapshotsDir <- asks repoSnapshotsDir
manifest <- readManifest
snapshots <- io $ getSnapshots snapshotsDir
case parseArgs args options snapshots of
Left date ->
handleDate manifest date
Right snapshot ->
handleSnapshot stateDir manifest snapshot
where parseArgs args options snapshots
| forceDate options = Left snapshotOrDate
| snapshotOrDate `elem` snapshots = Right snapshotOrDate
| otherwise = Left snapshotOrDate
where snapshotOrDate = unwords args
handleSnapshot snapshotsDir manifest snapshot =
tryCheckoutSnapshot =<< readSnapshotByName snapshotsDir snapshot projects
where projects = snapshotProjects manifest
handleDate manifest date = do
partialSnapshot <- snapshotByDate manifest =<< io (mustParseDate date)
forM_ partialSnapshot $ \(Project{projectName}, ref) ->
when (isNothing ref) $
io $ warn $ "couldn't find a commit matching the date in "
++ Text.unpack projectName
tryCheckoutSnapshot (toFullSnapshot partialSnapshot)
saveCmd :: Command Options
saveCmd = defCmd { cmdName = "save"
, cmdHandler = saveHandler
, cmdShortDesc = "save current state of all projects"
, cmdCategory = mainCategory
}
saveHandler :: App Options ()
saveHandler = do
args <- appArgs
options <- appConfig
case args of
[name] ->
runRepo $ do
snapshotsDir <- asks repoSnapshotsDir
projects <- snapshotProjects <$> readManifest
snapshots <- io $ getSnapshots snapshotsDir
unless (isValidSnapshotName name) $
error $ "invalid snapshot name '" ++ name ++ "'"
when (name `elem` snapshots && not (overwriteSnapshot options)) $
error $ "snapshot '" ++ name ++ "' already exists"
heads <- getHeadsSnapshot projects >>= \hs ->
if resolveRefNames options
then resolveSnapshot hs
else return hs
io $ saveSnapshotByName snapshotsDir name heads
_ ->
-- TODO: would be helpful to be able to show help here
error "bad arguments"
deleteCmd :: Command Options
deleteCmd = defCmd { cmdName = "delete"
, cmdHandler = deleteHandler
, cmdShortDesc = "delete named snapshot"
, cmdCategory = mainCategory
}
deleteHandler :: App Options ()
deleteHandler = do
name <- argsExistingSnapshot
runRepo $ do
snapshotsDir <- asks repoStateDir
io $ removeSnapshotByName snapshotsDir name
showCmd :: Command Options
showCmd = defCmd { cmdName = "show"
, cmdHandler = showHandler
, cmdShortDesc = "show snapshot"
, cmdCategory = mainCategory
}
showHandler :: App Options ()
showHandler = do
name <- argsExistingSnapshot
runRepo $ do
snapshotsDir <- asks repoSnapshotsDir
projects <- snapshotProjects <$> readManifest
snapshot <- readSnapshotByName snapshotsDir name projects
io $ Text.putStrLn $ renderSnapshot snapshot
exportCmd :: Command Options
exportCmd = defCmd { cmdName = "export"
, cmdHandler = exportHandler
, cmdShortDesc = "export snapshot as manifest"
, cmdCategory = mainCategory
}
exportHandler :: App Options ()
exportHandler = do
name <- argsExistingSnapshot
runRepo $ do
snapshotsDir <- asks repoSnapshotsDir
(xml, manifest) <- readManifest_
snapshot <- readSnapshotByName snapshotsDir name (snapshotProjects manifest)
io $ putStrLn $ showTopElement (snapshotManifest snapshot xml)
main :: IO ()
main = appMainWithOptions app
| aartamonau/repo-bisect | src/Snapshot.hs | gpl-3.0 | 9,201 | 0 | 22 | 2,784 | 2,054 | 1,121 | 933 | 207 | 3 |
module HipSpec.Literal (trLiteral) where
import HipSpec.Theory
import HipSpec.Translate
import HipSpec.Property
import HipSpec.Lang.PolyFOL
import HipSpec.Id
trLiteral :: ArityMap -> [Id] -> Literal -> Formula LogicId LogicId
trLiteral am sc (e1 :=: e2) = trSimpExpr am sc e1 === trSimpExpr am sc e2
| danr/hipspec | src/HipSpec/Literal.hs | gpl-3.0 | 304 | 0 | 8 | 45 | 99 | 54 | 45 | 8 | 1 |
{-# LANGUAGE OverloadedStrings, FlexibleContexts #-}
module AnsiParser.Parser where
import AnsiParser.Types
-- import Text.ParserCombinators.Parsec.Token
-- import Text.Parsec
-- import Text.Parsec.Char (oneOf, satisfy)
-- import Text.Parsec.ByteString (Parser)
-- import Data.String (IsString)
-- -- import Data.ByteString.Char8 (ByteString)
-- import Data.Char (isControl)
-- -- isEscapeCode :: IsString a => a -> Bool
-- -- isEscapeCode = (`elem` "\x027")
-- -- TODO: Other option here?
-- escapeCode :: (ConsoleString a, Stream s m a) => ParsecT s u m a
-- escapeCode = satisfy isEscape
-- -- commandBody :: (Stream s m Char) => ParsecT s u m Char
-- -- commandBody = satisfy $ not . isControl
-- -- -- TODO: set reservedNames to hold the single-character codes
-- -- ansiTokenParser :: (IsString a, Stream s m a) => GenTokenParser s u m
-- -- ansiTokenParser = makeTokenParser $
-- -- LanguageDef { commentStart = ""
-- -- , commentEnd = ""
-- -- , commentLine = ""
-- -- , nestedComments = False
-- -- , identStart = parserZero
-- -- , identLetter = parserZero
-- -- , opStart = escapeCode
-- -- , opLetter = commandBody
-- -- , reservedNames = []
-- -- , reservedOpNames = []
-- -- , caseSensitive = True }
-- -- plainText :: Parser (Expr a)
-- -- plainText = do
-- -- body <- manyTill anyToken escapeCode
-- -- return $ Plain body
-- -- tokenParser :: Stream s m Char => GenLanguageDef s u m -> GenTokenParser s u m
-- -- tokenParser = makeTokenParser
| pscollins/ansi-parser | src/AnsiParser/Parser.hs | gpl-3.0 | 1,796 | 0 | 4 | 587 | 46 | 42 | 4 | 3 | 0 |
{-# LANGUAGE DeriveGeneric #-}
import Text.Printf (printf)
import Data.Aeson
import GHC.Generics
import Network.HTTP.Conduit (simpleHttp)
import Data.Text (Text)
import qualified Data.Text as T
import Data.List (maximumBy, sortBy)
import Data.Function (on)
data Person =
Person { name :: Text
, craft :: Text
} deriving (Show, Generic)
instance FromJSON Person
instance ToJSON Person
data InSpace =
InSpace { people :: [Person]
, number :: Int
, message :: !Text
} deriving (Show, Generic)
instance FromJSON InSpace
instance ToJSON InSpace
getInSpace :: IO (Maybe InSpace)
getInSpace = do
fmap decode $ simpleHttp $ "http://api.open-notify.org/astros.json"
format :: InSpace -> String
format is =
header ++ concatMap row peeps
where
numPeeps = show $ number is
peeps = sortBy (compare `on` (last . words . T.unpack . name)) $ people is
header = "There are " ++ numPeeps ++ " people in space at the moment\n"
++ printf nameColFmt "Name"
++ printf craftColFmt "Craft"
++ "\n--------------------------------\n"
row p = nameFmt p ++ craftFmt p
nameFmt p = printf nameColFmt (T.unpack $ name p)
craftFmt p = printf craftColFmt (T.unpack $ craft p)
nameColFmt = "%" ++ (show $ longestBy name peeps) ++ "s | "
craftColFmt = "%" ++ (show $ longestBy craft peeps) ++ "s \n"
longestBy f = (lengthBy f) . maximumBy (compare `on` (lengthBy f))
lengthBy f = T.length . f
main :: IO ()
main = do
i <- getInSpace
case i of
Just is -> putStrLn $ format is
Nothing -> error "API problem"
| ciderpunx/57-exercises-for-programmers | src/P47InSpace.hs | gpl-3.0 | 1,733 | 0 | 14 | 506 | 547 | 286 | 261 | 49 | 2 |
import Data.List( nub, foldl' )
import Control.Monad( liftM, replicateM_ )
fact1 n = product [1..n]
z1 = length . takeWhile (=='0') . reverse . show . fact1
check3 n = mod_2 || mod_5
where
mod_2 = (n `mod` 2) == 0
mod_5 = (n `mod` 5) == 0
fact3 n = product . filter check3 $ [2..n]
z3 = length . takeWhile (=='0') . reverse . show . fact3
incDic [] k = [(k,1)]
incDic ((k1,v):xs) k
| k1 == k = (k1,v+1):xs
| otherwise = (k,1):(k1,v):xs
primeDecomp n = primeDecomp' n [2,5] []
primeDecomp' 1 _ d = d
primeDecomp' _ [] d = d
primeDecomp' n (p:ps) d
| mm == 0 = primeDecomp' dd (p:ps) (incDic d p)
| otherwise = primeDecomp' n ps d
where (dd,mm) = n `divMod` p
countMin n = count2_5 (primeDecomp n) (0,0)
count2_5 [] val = val
count2_5 ((i,n):xs) (v2,v5)
| i == 2 = count2_5 xs (v2+n,v5)
| otherwise = count2_5 xs (v2,v5+n)
sum11 (a2,b2) a = (a1+a2,b1+b2)
where (a1,b1) = countMin a
fact5 :: Int -> (Int,Int)
fact5 n = foldl' sum11 (0,0) [2..n]
z5 = (\(a,b) -> min a b ) . fact5
testCase = do
val <- getLine
print (z5 (read val))
main = do
num <- getLine
replicateM_ (read num) testCase
| zhensydow/ljcsandbox | lang/haskell/spoj/FCTRL.hs | gpl-3.0 | 1,139 | 0 | 11 | 270 | 693 | 368 | 325 | 36 | 1 |
module PropT10 where
import Prelude(Bool(..))
import Zeno
-- Definitions
True && x = x
_ && _ = False
False || x = x
_ || _ = True
not True = False
not False = True
-- Nats
data Nat = S Nat | Z
(+) :: Nat -> Nat -> Nat
Z + y = y
(S x) + y = S (x + y)
(*) :: Nat -> Nat -> Nat
Z * _ = Z
(S x) * y = y + (x * y)
(==),(/=) :: Nat -> Nat -> Bool
Z == Z = True
Z == _ = False
S _ == Z = False
S x == S y = x == y
x /= y = not (x == y)
(<=) :: Nat -> Nat -> Bool
Z <= _ = True
_ <= Z = False
S x <= S y = x <= y
one, zero :: Nat
zero = Z
one = S Z
double :: Nat -> Nat
double Z = Z
double (S x) = S (S (double x))
even :: Nat -> Bool
even Z = True
even (S Z) = False
even (S (S x)) = even x
half :: Nat -> Nat
half Z = Z
half (S Z) = Z
half (S (S x)) = S (half x)
mult :: Nat -> Nat -> Nat -> Nat
mult Z _ acc = acc
mult (S x) y acc = mult x y (y + acc)
fac :: Nat -> Nat
fac Z = S Z
fac (S x) = S x * fac x
qfac :: Nat -> Nat -> Nat
qfac Z acc = acc
qfac (S x) acc = qfac x (S x * acc)
exp :: Nat -> Nat -> Nat
exp _ Z = S Z
exp x (S n) = x * exp x n
qexp :: Nat -> Nat -> Nat -> Nat
qexp x Z acc = acc
qexp x (S n) acc = qexp x n (x * acc)
-- Lists
length :: [a] -> Nat
length [] = Z
length (_:xs) = S (length xs)
(++) :: [a] -> [a] -> [a]
[] ++ ys = ys
(x:xs) ++ ys = x : (xs ++ ys)
drop :: Nat -> [a] -> [a]
drop Z xs = xs
drop _ [] = []
drop (S x) (_:xs) = drop x xs
rev :: [a] -> [a]
rev [] = []
rev (x:xs) = rev xs ++ [x]
qrev :: [a] -> [a] -> [a]
qrev [] acc = acc
qrev (x:xs) acc = qrev xs (x:acc)
revflat :: [[a]] -> [a]
revflat [] = []
revflat ([]:xss) = revflat xss
revflat ((x:xs):xss) = revflat (xs:xss) ++ [x]
qrevflat :: [[a]] -> [a] -> [a]
qrevflat [] acc = acc
qrevflat ([]:xss) acc = qrevflat xss acc
qrevflat ((x:xs):xss) acc = qrevflat (xs:xss) (x:acc)
rotate :: Nat -> [a] -> [a]
rotate Z xs = xs
rotate _ [] = []
rotate (S n) (x:xs) = rotate n (xs ++ [x])
elem :: Nat -> [Nat] -> Bool
elem _ [] = False
elem n (x:xs) = n == x || elem n xs
subset :: [Nat] -> [Nat] -> Bool
subset [] ys = True
subset (x:xs) ys = x `elem` xs && subset xs ys
intersect,union :: [Nat] -> [Nat] -> [Nat]
(x:xs) `intersect` ys | x `elem` ys = x:(xs `intersect` ys)
| otherwise = xs `intersect` ys
[] `intersect` ys = []
union (x:xs) ys | x `elem` ys = union xs ys
| otherwise = x:(union xs ys)
union [] ys = ys
isort :: [Nat] -> [Nat]
isort [] = []
isort (x:xs) = insert x (isort xs)
insert :: Nat -> [Nat] -> [Nat]
insert n [] = [n]
insert n (x:xs) =
case n <= x of
True -> n : x : xs
False -> x : (insert n xs)
count :: Nat -> [Nat] -> Nat
count n (x:xs) | n == x = S (count n xs)
| otherwise = count n xs
count n [] = Z
sorted :: [Nat] -> Bool
sorted (x:y:xs) = x <= y && sorted (y:xs)
sorted _ = True
-- Theorem
prop_T10 :: [a] -> Prop
prop_T10 x = prove (rev (rev x) :=: x)
| danr/hipspec | testsuite/prod/zeno_version/PropT10.hs | gpl-3.0 | 2,957 | 0 | 10 | 913 | 1,989 | 1,035 | 954 | 114 | 2 |
{-# LANGUAGE OverloadedStrings #-}
module TypeCheck where
import AST
import qualified Data.Map as M
import Control.Monad
import Control.Monad.Reader
import Control.Applicative
import Data.Ord
import qualified Data.List as L
import Prelude hiding (lookup)
(>>>) = flip ($)
trace = flip const
lookup key map = case M.lookup key map of
Just v -> Right v
Nothing -> Left $ show key ++ " not found in map: " ++ show map
toEither True = Right undefined
toEither False = Left "False"
fromMaybe onError Nothing = Left onError
fromMaybe _ (Just a) = Right a
confirmInt (NamedType i) = if i == "int"
then Right $ NamedType i
else Left $ show i ++ " is not an int"
confirmInt t = Left $ show t ++ " is not an int :35"
confirmIntString (NamedType t) = if t == "int" || t == "string"
then Right $ NamedType t
else Left $ show t ++ " is neither int nor string"
confirmIntString x = Left $ show x ++ " is neither int nor string"
typeCheckArith e1 e2 = do
trace ("confirmInt: " ++ show e1) $ confirmInt <$> typeCheck e1
trace ("confirmInt: " ++ show e2) $ confirmInt <$> typeCheck e2
trace "" $ return $ (NamedType "int")
-- Takes a type and resolves any type aliases
-- Since record types ignore structural equivalence, they are always
-- canonical
canonicalize :: Type -> ReaderT (M.Map Identifier Type) (Either String) Type
canonicalize (NamedType name) = if L.elem name rootTypes
then return (NamedType name)
else ask >>= (lift . lookup name) >>= canonicalize
where rootTypes = ["int", "string"]
canonicalize r@(RecType _) = return r
canonicalize (ArrType t) = ArrType <$> canonicalize t
canonicalize (FuncType args ret) = FuncType <$> mapM canonicalize args <*> canonicalize ret
canonicalize Top = return Top
canonicalize VoidT = return VoidT
mergeTypes :: String -> Type -> Type -> ReaderT (M.Map Identifier Type) (Either String) Type
-- If one of the arguments is nil (type Top), it does match a record
mergeTypes' _ r@(RecType ps) Top = return r
mergeTypes' _ Top r@(RecType ps) = return r
mergeTypes' msg t1 t2 = do
case t1 == t2 of
False -> lift . Left $ msg ++ " Couldn't merge " ++ show t1 ++ " and " ++ show t2
True -> lift $ Right t1
return t1
mergeTypes msg t1 t2 = do
t1' <- canonicalize t1
t2' <- canonicalize t2
mergeTypes' msg t1' t2'
matchesType e t = do
_ <- mergeTypes "" <$> typeCheck e <*> t
return t
sameType :: Show a => Expr a -> Expr a -> ReaderT (M.Map Identifier Type) (Either String) Type
sameType e1 e2 = do
t1 <- typeCheck e1
t2 <- typeCheck e2
env <- ask
mergeTypes ("checking that " ++ show e1 ++ " and " ++ show e2 ++ " have the same type in environment " ++ show env) t1 t2
isInteger e = trace ("isInteger? " ++ show e) $ typeCheck e >>= (lift . confirmInt)
insertMany = flip $ foldl (\m (k,v) -> M.insert k v m)
string = NamedType "string"
int = NamedType "int"
standardLibraryTypes = M.fromList [
("print",FuncType [string] VoidT),
("flush",FuncType [] VoidT),
("getchar",FuncType [] string),
("ord",FuncType [string] int),
("chr",FuncType [int] string),
("size",FuncType [string] int),
("substring", FuncType [string,int,int] string),
("concat",FuncType [string,string] string),
("not", FuncType [int] int),
("exit", FuncType [int] VoidT)]
typeCheckTiger :: Show a => Expr a -> Either String Type
typeCheckTiger prog = runReaderT (typeCheck prog) standardLibraryTypes
-- typecheck takes an expression, and returns the type of the expression
-- if it has a valid type
typeCheck :: Show a => Expr a -> ReaderT (M.Map Identifier Type) (Either String) Type
typeCheck (LValueId i _) = ask >>= (lift . lookup i)
typeCheck (LValueField rec fieldName _) = do
-- Nothing <- (error . show <$> ask)
recTy <- typeCheck rec >>= canonicalize
case recTy of
(RecType fields) -> lift
$ fromMaybe ("No field " ++ show fieldName
++ " in record " ++ show recTy)
$ L.lookup fieldName fields
Top -> lift $ Left "Nil type"
t -> lift $ Left $ "Not a record: " ++ show t ++ " in " ++ show rec ++ "." ++ show fieldName
typeCheck (LValueSubscript arr subscript _) = do
subTy <- typeCheck subscript
arrTy <- typeCheck arr
case arrTy of
ArrType t -> (lift $ confirmInt subTy) >> return t
_ -> lift $ Left $ show arrTy ++ " is not an array type"
typeCheck (Nil _) = return Top
typeCheck (Seq es _) = mapM typeCheck es >>= (return . last)
typeCheck (Void _) = return VoidT
typeCheck (IntLit i _) = return $ NamedType "int"
typeCheck (StringLit _ _) = return $ NamedType "string"
typeCheck (Negation i _) = typeCheck i >>= (lift . confirmInt)
typeCheck (FunctionCall funcName args _) = do
FuncType paramTypes retType <- ask >>= (lift . lookup funcName)
argTypes <- mapM (typeCheck >=> canonicalize) args
paramTypes' <- mapM canonicalize paramTypes
if argsMatch paramTypes' argTypes
then return retType
else lift . Left $ "Argument types don't match in call of " ++ show funcName ++ " Expected: " ++ show paramTypes' ++ " Got: " ++ show argTypes
where argsMatch p a = length p == length a && (and $ L.zipWith (==) p a)
typeCheck (Add e1 e2 _) = typeCheckArith e1 e2
typeCheck (Sub e1 e2 _) = typeCheckArith e1 e2
typeCheck (Mult e1 e2 _) = typeCheckArith e1 e2
typeCheck (Div e1 e2 _) = typeCheckArith e1 e2
typeCheck (Comp Eq e1 e2 _) = sameType e1 e2 >> (return $ NamedType "int")
typeCheck (Comp _ e1 e2 _) = do
t1 <- typeCheck e1
t2 <- typeCheck e2
lift $ confirmIntString t1
lift $ confirmIntString t2
if t1 == t2
then return t1
else lift $ Left "Can't compare values of different type"
typeCheck (And e1 e2 _) = typeCheckArith e1 e2
typeCheck (Or e1 e2 _) = typeCheckArith e1 e2
typeCheck (Record typeId fields _) = do
recType <- ask >>= (lift . lookup typeId)
case recType of
RecType params -> argsMatch fields params >> return recType
_ -> lift . Left $ show recType ++ " is not a record type"
where argsMatch fields params = do
let ls = length fields == length params
ts <- typesMatch
(map snd $ L.sortBy (comparing fst) fields)
(map snd $ L.sortBy (comparing fst) params)
if ls && ts
then lift $ Right undefined
else lift $ Left $ "Arguments don't match in creation of record " ++ show typeId
typesMatch fields params = (==) params <$> mapM typeCheck fields
typeCheck (Array typeId len val _) = do
isInteger len
arrType <- ask >>= (lift . lookup typeId)
valType <- typeCheck val
mergeTypes ("Array of " ++ show valType) (ArrType valType) arrType
return arrType
typeCheck (Assignment lval rval _) = do
sameType lval rval
return VoidT
typeCheck (IfThenElse c t f _) = do
isInteger c
sameType t f
typeCheck (IfThen c t d) = do
isInteger c
sameType t $ Void d
typeCheck (While c a d) = do
isInteger c
sameType a $ Void d
typeCheck (For i start end body d) = do
isInteger start
isInteger end
local (M.insert i (NamedType "int")) $ typeCheck body
typeCheck (Break _) = return VoidT
typeCheck (Let ds e _) = letCheck (splitDeclarations ds) e
letCheck :: Show a => [[Decl a]] -> Expr a -> ReaderT (M.Map Identifier Type) (Either String) Type
letCheck (ts@(TypeDec _ _ _ _:_):ds) e = do
let bindings = map extractSigs ts
newEnv <- insertMany bindings <$> ask
local (const newEnv) $ letCheck ds e
where extractSigs (TypeDec _ i t _) = (i,t)
extractSigs _ = error "Encountered a non-type binding"
letCheck ((VarDec _ i ve _:vs):ds) e = do
t <- typeCheck ve
local (M.insert i t) $ letCheck (vs:ds) e
letCheck ((TVarDec _ i t v _:vs):ds) e = do
typeCheck v >>= mergeTypes ("Type of variable " ++ show i) t
local (M.insert i t) $ letCheck (vs:ds) e
letCheck (fs@(FunDec _ _ _ _ _:_):ds) e = letFCheck fs ds e
letCheck (fs@(TFunDec _ _ _ _ _ _:_):ds) e = letFCheck fs ds e
letCheck ([]:ds) e = letCheck ds e
letCheck [] e = typeCheck e
--letCheck ds e = error $ "Encountered unexpected pattern: ds=" ++ show ds ++ "\te=" ++ show e
letFCheck :: Show a => [Decl a] -> [[Decl a]] -> Expr a -> ReaderT (M.Map Identifier Type) (Either String) Type
letFCheck funcs ds e = do
let bindings = map extractSig funcs
newEnv <- insertMany bindings <$> ask
local (const newEnv) $ mapM typeCheckDecl funcs
local (const newEnv) $ letCheck ds e
where extractSig (FunDec _ i args _ _) = (i,FuncType (map snd args) VoidT)
extractSig (TFunDec _ i args r _ _) = (i,FuncType (map snd args) r)
typeCheckFun (FunDec _ _ _ _ e) = typeCheck e
typeCheckFun (TFunDec _ i _ r e _) = typeCheck e >>= mergeTypes ("Return type of function " ++ show i) r
splitDeclarations :: [Decl a] -> [[Decl a]]
splitDeclarations = L.groupBy declType
where declType (TypeDec _ _ _ _) (TypeDec _ _ _ _) = True
declType (VarDec _ _ _ _) (VarDec _ _ _ _) = True
declType (TVarDec _ _ _ _ _) (TVarDec _ _ _ _ _) = True
declType (VarDec _ _ _ _) (TVarDec _ _ _ _ _) = True
declType (TVarDec _ _ _ _ _) (VarDec _ _ _ _) = True
declType (FunDec _ _ _ _ _) (FunDec _ _ _ _ _) = True
declType (TFunDec _ _ _ _ _ _) (TFunDec _ _ _ _ _ _) = True
declType (FunDec _ _ _ _ _) (TFunDec _ _ _ _ _ _) = True
declType (TFunDec _ _ _ _ _ _) (FunDec _ _ _ _ _) = True
declType _ _ = False
typeCheckDecl :: (Show a) => Decl a -> ReaderT (M.Map Identifier Type) (Either String) (M.Map Identifier Type)
typeCheckDecl (TypeDec _ i t _) = M.insert i t <$> ask
typeCheckDecl (VarDec _ i e _) = M.insert i <$> typeCheck e <*> ask
typeCheckDecl (TVarDec _ i t e _) = M.insert i <$> (typeCheck e >>= mergeTypes ("In variable declaration " ++ show i ++ "=" ++ show e) t) <*> ask
typeCheckDecl (FunDec _ i args body _) = do
bodyType <- local (insertMany args) $ typeCheck body
M.insert i (FuncType (map snd args) bodyType) <$> ask
typeCheckDecl (TFunDec _ i args rt body _) = do
bodyType <- local (insertMany args) $ typeCheck body
return . toEither $ bodyType == rt
M.insert i (FuncType (map snd args) bodyType) <$> ask | joelwilliamson/modern-compilers-exercises | TypeCheck.hs | gpl-3.0 | 10,156 | 96 | 18 | 2,415 | 4,340 | 2,112 | 2,228 | 213 | 10 |
{-# LANGUAGE DeriveDataTypeable #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE TypeFamilies #-}
{-# OPTIONS_GHC -fno-warn-unused-imports #-}
{-# OPTIONS_GHC -fno-warn-unused-binds #-}
{-# OPTIONS_GHC -fno-warn-unused-matches #-}
-- Derived from AWS service descriptions, licensed under Apache 2.0.
-- |
-- Module : Network.AWS.IAM.ListVirtualMFADevices
-- Copyright : (c) 2013-2015 Brendan Hay
-- License : Mozilla Public License, v. 2.0.
-- Maintainer : Brendan Hay <[email protected]>
-- Stability : auto-generated
-- Portability : non-portable (GHC extensions)
--
-- Lists the virtual MFA devices under the AWS account by assignment
-- status. If you do not specify an assignment status, the action returns a
-- list of all virtual MFA devices. Assignment status can be 'Assigned',
-- 'Unassigned', or 'Any'.
--
-- You can paginate the results using the 'MaxItems' and 'Marker'
-- parameters.
--
-- /See:/ <http://docs.aws.amazon.com/IAM/latest/APIReference/API_ListVirtualMFADevices.html AWS API Reference> for ListVirtualMFADevices.
--
-- This operation returns paginated results.
module Network.AWS.IAM.ListVirtualMFADevices
(
-- * Creating a Request
listVirtualMFADevices
, ListVirtualMFADevices
-- * Request Lenses
, lvmdAssignmentStatus
, lvmdMarker
, lvmdMaxItems
-- * Destructuring the Response
, listVirtualMFADevicesResponse
, ListVirtualMFADevicesResponse
-- * Response Lenses
, lvmdrsMarker
, lvmdrsIsTruncated
, lvmdrsResponseStatus
, lvmdrsVirtualMFADevices
) where
import Network.AWS.IAM.Types
import Network.AWS.IAM.Types.Product
import Network.AWS.Pager
import Network.AWS.Prelude
import Network.AWS.Request
import Network.AWS.Response
-- | /See:/ 'listVirtualMFADevices' smart constructor.
data ListVirtualMFADevices = ListVirtualMFADevices'
{ _lvmdAssignmentStatus :: !(Maybe AssignmentStatusType)
, _lvmdMarker :: !(Maybe Text)
, _lvmdMaxItems :: !(Maybe Nat)
} deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'ListVirtualMFADevices' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'lvmdAssignmentStatus'
--
-- * 'lvmdMarker'
--
-- * 'lvmdMaxItems'
listVirtualMFADevices
:: ListVirtualMFADevices
listVirtualMFADevices =
ListVirtualMFADevices'
{ _lvmdAssignmentStatus = Nothing
, _lvmdMarker = Nothing
, _lvmdMaxItems = Nothing
}
-- | The status (unassigned or assigned) of the devices to list. If you do
-- not specify an 'AssignmentStatus', the action defaults to 'Any' which
-- lists both assigned and unassigned virtual MFA devices.
lvmdAssignmentStatus :: Lens' ListVirtualMFADevices (Maybe AssignmentStatusType)
lvmdAssignmentStatus = lens _lvmdAssignmentStatus (\ s a -> s{_lvmdAssignmentStatus = a});
-- | Use this parameter only when paginating results and only after you have
-- received a response where the results are truncated. Set it to the value
-- of the 'Marker' element in the response you just received.
lvmdMarker :: Lens' ListVirtualMFADevices (Maybe Text)
lvmdMarker = lens _lvmdMarker (\ s a -> s{_lvmdMarker = a});
-- | Use this only when paginating results to indicate the maximum number of
-- items you want in the response. If there are additional items beyond the
-- maximum you specify, the 'IsTruncated' response element is 'true'.
--
-- This parameter is optional. If you do not include it, it defaults to
-- 100.
lvmdMaxItems :: Lens' ListVirtualMFADevices (Maybe Natural)
lvmdMaxItems = lens _lvmdMaxItems (\ s a -> s{_lvmdMaxItems = a}) . mapping _Nat;
instance AWSPager ListVirtualMFADevices where
page rq rs
| stop (rs ^. lvmdrsMarker) = Nothing
| stop (rs ^. lvmdrsVirtualMFADevices) = Nothing
| otherwise =
Just $ rq & lvmdMarker .~ rs ^. lvmdrsMarker
instance AWSRequest ListVirtualMFADevices where
type Rs ListVirtualMFADevices =
ListVirtualMFADevicesResponse
request = postQuery iAM
response
= receiveXMLWrapper "ListVirtualMFADevicesResult"
(\ s h x ->
ListVirtualMFADevicesResponse' <$>
(x .@? "Marker") <*> (x .@? "IsTruncated") <*>
(pure (fromEnum s))
<*>
(x .@? "VirtualMFADevices" .!@ mempty >>=
parseXMLList "member"))
instance ToHeaders ListVirtualMFADevices where
toHeaders = const mempty
instance ToPath ListVirtualMFADevices where
toPath = const "/"
instance ToQuery ListVirtualMFADevices where
toQuery ListVirtualMFADevices'{..}
= mconcat
["Action" =: ("ListVirtualMFADevices" :: ByteString),
"Version" =: ("2010-05-08" :: ByteString),
"AssignmentStatus" =: _lvmdAssignmentStatus,
"Marker" =: _lvmdMarker, "MaxItems" =: _lvmdMaxItems]
-- | Contains the response to a successful ListVirtualMFADevices request.
--
-- /See:/ 'listVirtualMFADevicesResponse' smart constructor.
data ListVirtualMFADevicesResponse = ListVirtualMFADevicesResponse'
{ _lvmdrsMarker :: !(Maybe Text)
, _lvmdrsIsTruncated :: !(Maybe Bool)
, _lvmdrsResponseStatus :: !Int
, _lvmdrsVirtualMFADevices :: ![VirtualMFADevice]
} deriving (Eq,Read,Show,Data,Typeable,Generic)
-- | Creates a value of 'ListVirtualMFADevicesResponse' with the minimum fields required to make a request.
--
-- Use one of the following lenses to modify other fields as desired:
--
-- * 'lvmdrsMarker'
--
-- * 'lvmdrsIsTruncated'
--
-- * 'lvmdrsResponseStatus'
--
-- * 'lvmdrsVirtualMFADevices'
listVirtualMFADevicesResponse
:: Int -- ^ 'lvmdrsResponseStatus'
-> ListVirtualMFADevicesResponse
listVirtualMFADevicesResponse pResponseStatus_ =
ListVirtualMFADevicesResponse'
{ _lvmdrsMarker = Nothing
, _lvmdrsIsTruncated = Nothing
, _lvmdrsResponseStatus = pResponseStatus_
, _lvmdrsVirtualMFADevices = mempty
}
-- | When 'IsTruncated' is 'true', this element is present and contains the
-- value to use for the 'Marker' parameter in a subsequent pagination
-- request.
lvmdrsMarker :: Lens' ListVirtualMFADevicesResponse (Maybe Text)
lvmdrsMarker = lens _lvmdrsMarker (\ s a -> s{_lvmdrsMarker = a});
-- | A flag that indicates whether there are more items to return. If your
-- results were truncated, you can make a subsequent pagination request
-- using the 'Marker' request parameter to retrieve more items.
lvmdrsIsTruncated :: Lens' ListVirtualMFADevicesResponse (Maybe Bool)
lvmdrsIsTruncated = lens _lvmdrsIsTruncated (\ s a -> s{_lvmdrsIsTruncated = a});
-- | The response status code.
lvmdrsResponseStatus :: Lens' ListVirtualMFADevicesResponse Int
lvmdrsResponseStatus = lens _lvmdrsResponseStatus (\ s a -> s{_lvmdrsResponseStatus = a});
-- | The list of virtual MFA devices in the current account that match the
-- 'AssignmentStatus' value that was passed in the request.
lvmdrsVirtualMFADevices :: Lens' ListVirtualMFADevicesResponse [VirtualMFADevice]
lvmdrsVirtualMFADevices = lens _lvmdrsVirtualMFADevices (\ s a -> s{_lvmdrsVirtualMFADevices = a}) . _Coerce;
| fmapfmapfmap/amazonka | amazonka-iam/gen/Network/AWS/IAM/ListVirtualMFADevices.hs | mpl-2.0 | 7,418 | 0 | 14 | 1,516 | 993 | 587 | 406 | 112 | 1 |
-- Problem 1
-- Find the last element of a list.
last' [] = error "Empty list has no last element."
last' (x:[]) = x
last' (_:xs) = last' xs
-- Problem 2
-- Find the last but one element of a list.
butLast' [] = error "Empty list has has no but last element."
butLast' (_:[]) = error "List with one element has no but last element."
butLast' (x:_:[]) = x
butLast' (_:xs) = butLast' xs
-- Problem 3
-- Find the kth element of a list
[] !!! _ = error "Out of bounds error"
_ !!! n | n <= 0 = error "Index must be 1 or greater"
(x:xs) !!! 1 = x
(x:xs) !!! n = xs !!! (n-1)
-- Problem 4
-- Find the number of elements in a list
length' [] = 0
length' (x:xs) = 1 + (length' xs)
-- Problem 5
-- Reverse a list
reverse' [] = []
reverse' (x:xs) = reverse' xs ++ [x]
-- Problem 6
-- Find out whether a list is a palindrome
isPalindrome' l = l == (reverse' l)
-- Problem 7
-- Flatten a nested list structure
data NestedList a = Elem a | List [NestedList a]
flatten :: (NestedList a) -> [a]
flatten (Elem x) = [x]
flatten (List []) = []
flatten (List (x:xs)) = (flatten x) ++ (flatten (List xs))
-- Problem 8
-- Eliminate consecutive duplicates of list elements.
-- If a list contains repeated elements they should be replaced with a single
-- copy of the element. The order of the elements should not be changed.
compress [] = []
compress (x:[]) = [x]
compress (x:y:[])
| x == y = [x]
| otherwise = [x, y]
compress (x:y:xs)
| x == y = compress (x:xs)
| otherwise = x:(compress (y:xs))
-- Problem 9
-- Pack consecutive duplicates of list elements into sublists.
-- If a list contains repeated elements they should be placed into seperate
-- sublists.
pack [] = [[]]
pack (x:[]) = [[x]]
pack (x:xs) = let p@(px:pxs) = pack xs
in
if (x == (head px)) then
(x:px):pxs
else
[x]:p
-- Problem 10
-- Run-length encoding of a list. Use the result of problem P09 to implement
-- the so-called run-length encoding data compression method. Consecutive
-- duplicates of elements are encoded as lists (N E) where N is the number
-- of duplicates of the element E.
encode [] = []
encode list = [(length' sub, head sub) | sub <- pack list] | porglezomp/learn-languages | haskell/99.hs | unlicense | 2,234 | 0 | 11 | 540 | 751 | 396 | 355 | 37 | 2 |
import System.IO
getChar' :: IO (Char, Char)
getChar' = do x <- getChar
getChar
y <- getChar
return (x, y)
getLine' :: IO String
getLine' = do x <- getChar
if x == '\n'
then return []
else do xs <- getLine'
return (x:xs)
putStr' :: String -> IO()
putStr' [] = return ()
putStr' (x:xs) = do putChar x
putStr' xs
putStr'' :: String -> IO()
putStr'' [] = return ()
putStr'' (x:xs) = putChar x >> putStr'' xs
putStrLn' :: String -> IO ()
putStrLn' xs = do putStr xs
putChar '\n'
putStrLn'' :: String -> IO ()
putStrLn'' [] = putChar '\n'
putStrLn'' xs = putStr' xs >> putStrLn ""
--putStrLn'' xs = putStr' xs >>= \ x -> putChar '\n'
--putStrLn'' xs = putStr' xs >> putChar' '\n'
--putStrLn'' xs = putStr' xs >> putStr' "\n"
sequence' :: Monad m => [m a] -> m ()
sequence' [] = return ()
sequence' (m: ms) = (foldl (>>) m ms) >> return ()
sequence'' ms = foldl (>>) (return ()) ms
sequenc'' ms = foldr (>>=) (return ()) ms
sequence''' [] = return ()
sequence''' (m:ms) = m >> sequence''' ms
sequence'''' [] = return ()
sequence'''' (m:ms) = m >>= \ _ -> sequence''' ms
strLen :: IO ()
strLen = do putStr "Enter any string: "
xs <- getLine
putStr "Entered String had "
putStr $ show $ length xs
putStrLn' " characters."
---------- Hangman program : guess word
getCh :: IO Char
getCh = do hSetEcho stdin False
c <- getChar
hSetEcho stdin True
return c
sgetLine :: IO String
sgetLine = do x <- getCh
if x == '\n'
then do putChar x
return []
else do putChar '-'
xs <- sgetLine
return (x:xs)
guess :: String -> IO ()
guess word = do putStr "> Can You Guess My Word ?\n"
xs <- getLine
if xs == word
then putStrLn "You Got It ! :)"
else do putStrLn (diff word xs)
guess word
diff :: String -> String -> String
diff xs ys = [if elem x ys then x else '-' | x <- xs]
| dongarerahul/edx-haskell | chapter-8.hs | apache-2.0 | 2,223 | 1 | 12 | 825 | 800 | 383 | 417 | 62 | 2 |
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE DeriveGeneric #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
module Kubernetes.V1.ResourceQuotaStatus where
import GHC.Generics
import Kubernetes.Any
import qualified Data.Aeson
-- | ResourceQuotaStatus defines the enforced hard limits and observed use.
data ResourceQuotaStatus = ResourceQuotaStatus
{ hard :: Maybe Any -- ^ Hard is the set of enforced hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
, used :: Maybe Any -- ^ Used is the current observed total usage of the resource in the namespace.
} deriving (Show, Eq, Generic)
instance Data.Aeson.FromJSON ResourceQuotaStatus
instance Data.Aeson.ToJSON ResourceQuotaStatus
| minhdoboi/deprecated-openshift-haskell-api | kubernetes/lib/Kubernetes/V1/ResourceQuotaStatus.hs | apache-2.0 | 868 | 0 | 9 | 116 | 93 | 56 | 37 | 15 | 0 |
{-# LANGUAGE CPP #-}
module CLaSH.GHC.Compat.GHC
( defaultErrorHandler
)
where
import qualified DynFlags
import qualified Exception
import qualified GHC
import qualified MonadUtils
defaultErrorHandler ::
(Exception.ExceptionMonad m, MonadUtils.MonadIO m)
=> m a
-> m a
#if __GLASGOW_HASKELL__ >= 706
defaultErrorHandler = GHC.defaultErrorHandler DynFlags.defaultFatalMessager DynFlags.defaultFlushOut
#else
defaultErrorHandler = GHC.defaultErrorHandler DynFlags.defaultLogAction
#endif
| christiaanb/clash-compiler | clash-ghc/src-ghc/CLaSH/GHC/Compat/GHC.hs | bsd-2-clause | 498 | 0 | 7 | 61 | 83 | 50 | 33 | 12 | 1 |
module Development.Cake3.Ext.UrEmbed.Types where
import Data.Char
import System.FilePath
import Text.Printf
data Args = A
{ bver :: Bool
, out_c :: FilePath
, out_h :: FilePath
, out_urs :: FilePath
, out_wrapper :: FilePath
, out_ffi_js :: FilePath
, mangle_css_url :: Bool
, inp :: FilePath
}
-- out_ffi_js a = wrap (out_ffi_js_lib a) where
-- wrap [] = []
-- wrap p = (dropExtension p) ++ "_js.urs"
guessModName :: FilePath -> String
guessModName = uwModName . (++ ".urs")
uwModName :: FilePath -> String
uwModName = upper1 . notnum . map under . takeFileName . dropExtension . checkurs where
checkurs x | (takeExtension x) == ".urs" = x
| (takeExtension x) == ".ur" = x
| otherwise = error $ "uwModName: FILE.urs expected (got " ++ x ++ ")"
under c | c`elem`"_-. /" = '_'
| otherwise = c
upper1 [] = []
upper1 (x:xs) = (toUpper x) : xs
notnum [] = error $ "uwModName: Empty name"
notnum n@(x:xs) | isDigit x = error $ "uwModName: Names starting from digit is not allowed (got " ++ n ++ ")"
| otherwise = n
urblobfun = "blob"
urtextfun = "text"
cblobfun a = printf "uw_%s_%s" (uwModName (out_urs a)) urblobfun
ctextfun a = printf "uw_%s_%s" (uwModName (out_urs a)) urtextfun
type Url = String
css_mangle_flag = "css-mangle-urls"
| grwlf/cake3 | src/Development/Cake3/Ext/UrEmbed/Types.hs | bsd-3-clause | 1,331 | 0 | 12 | 314 | 412 | 221 | 191 | 33 | 3 |
module Chess.FEN ( fromFEN
, toFEN
, defaultFEN
, defaultBoard
) where
import Chess
import qualified Data.List as L
import Data.Char
import Data.Array
import Data.Maybe
split [] delim = [""]
split (c:cs) delim
| c == delim = "" : rest
| otherwise = (c : head rest) : tail rest
where rest = split cs delim
unsplit [] delim = ""
unsplit (x:[]) delim = x
unsplit (x:xs) delim = x ++ delim ++ (unsplit xs delim)
fromFEN :: String -> Maybe Board
fromFEN fen = readPosition $ words fen
where readPosition (pieces:turn:castle:enpassant:_) =
Just $ Board (clr turn) castle enpas board where
clr x = if x == "w" then White else Black
enpas = if enpassant == "-" then Nothing else Just $ strToPos enpassant
board = listArray ((0,0),(7,7)) (concat $ L.transpose $ map makeLine (reverse boardLines))
boardLines = split pieces '/'
makeLine ls = foldr ((++) . pcs) [] ls
pcs a = if isDigit a then replicate (digitToInt a) Nothing else [Just (read [a])]
readPosition _ = Nothing
toFEN :: Board -> String
toFEN brd = pieces ++ " " ++ turnstr ++ " " ++ castString ++ " " ++ enpassantstr where
pieces = unsplit (map fenline $ [ [ (board brd)!(j,7-i) | j<-[0..7]] | i<-[0..7]]) "/"
turnstr = if turn brd == White then "w" else "b"
enpassantstr = fromMaybe "-" (enpassant brd >>= \(x,y) -> return [chr (x+97), intToDigit (y+1)])
castString = if castlingAvail brd == "" then "-" else castlingAvail brd
fenline pcs = concatMap tos $ foldr com [] pcs where
tos = either show show
com a b = case a of
Just x -> (Right x) : b
Nothing -> case b of
((Left n):xs) -> (Left (n+1)) : xs
_ -> (Left 1) : b
defaultFEN :: String
defaultFEN = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq -"
defaultBoard :: Board
defaultBoard = fromJust $ fromFEN defaultFEN | ArnoVanLumig/chesshs | Chess/FEN.hs | bsd-3-clause | 1,970 | 2 | 19 | 555 | 834 | 438 | 396 | 45 | 5 |
{-# LANGUAGE DeriveAnyClass #-}
{-# LANGUAGE ScopedTypeVariables #-}
{-# LANGUAGE DataKinds #-}
{-# LANGUAGE LambdaCase #-}
{-# LANGUAGE TypeOperators #-}
{-# LANGUAGE BangPatterns #-}
-- | Property
module Haskus.System.Linux.Graphics.Property
( PropertyMeta (..)
, PropertyType (..)
, RawProperty (..)
, Property (..)
, InvalidProperty (..)
, getPropertyMeta
, PropValue
, ObjectID
, PropID
, PropertyMetaID
-- * Atomic properties
, setAtomic
, AtomicErrors
)
where
import Haskus.Utils.Flow
import Haskus.System.Linux.Handle
import Haskus.System.Linux.Internals.Graphics
import Haskus.System.Linux.Error
import Haskus.System.Linux.ErrorCode
import Haskus.Format.Binary.Word
import Haskus.Format.Binary.Ptr
import Haskus.Format.Binary.Buffer
import Haskus.Format.Binary.Storable
import Haskus.Format.String
import Data.Map as Map
-- | Property meta-information
data PropertyMeta = PropertyMeta
{ propertyID :: Word32 -- ^ ID of the property type
, propertyImmutable :: Bool -- ^ The value won't change
, propertyPending :: Bool -- ^ The value is pending
, propertyName :: String -- ^ Property name
, propertyType :: PropertyType -- ^ Type of the property
} deriving (Show,Eq)
-- | The type of a property
data PropertyType
= PropRange [Word64] -- ^ A range
| PropSignedRange [Int64] -- ^ A signed range
| PropEnum [(Word64,String)] -- ^ Value-enum
| PropBitmask [(Word64,String)] -- ^ Bit-enum (bitmask)
| PropBlob [(Word32,Buffer)] -- ^ Blob-enum
| PropObject
deriving (Show,Eq)
data RawProperty = RawProperty
{ rawPropertyMetaID :: Word32 -- ^ Card-wise property meta-info ID
, rawPropertyValue :: Word64 -- ^ Property value
} deriving (Show,Eq)
data Property = Property
{ propertyMeta :: PropertyMeta -- ^ Meta-information about the property
, propertyValue :: Word64 -- ^ Value of the property
} deriving (Show,Eq)
data InvalidProperty = InvalidProperty deriving (Show,Eq)
type PropertyMetaID = Word32
type ObjectID = Word32
type PropID = Word32
type PropValue = Word64
type AtomicErrors = '[InvalidHandle,InvalidParam,MemoryError,InvalidRange,EntryNotFound]
-- | Return meta-information from a property type ID
getPropertyMeta :: forall m. MonadInIO m => Handle -> PropertyMetaID -> FlowT '[InvalidParam,InvalidProperty] m PropertyMeta
getPropertyMeta fd pid = do
-- get value size/number of elements/etc.
g <- getProperty' gp
getValues (gpsCountValues g) (gpsCountEnum g) (getPropertyTypeType g)
||> PropertyMeta pid (isImmutable g) (isPending g) (fromCStringBuffer (gpsName g))
where
getProperty' :: StructGetProperty -> FlowT '[InvalidParam,InvalidProperty] m StructGetProperty
getProperty' r = ioctlGetProperty r fd
`catchLiftLeft` \case
EINVAL -> throwE InvalidParam
ENOENT -> throwE InvalidProperty
e -> unhdlErr "getPropertyMeta" e
gp = StructGetProperty
{ gpsValuesPtr = 0
, gpsEnumBlobPtr = 0
, gpsPropId = pid
, gpsFlags = 0
, gpsName = emptyCStringBuffer
, gpsCountValues = 0
, gpsCountEnum = 0
}
allocaArray' 0 f = f nullPtr
allocaArray' n f = allocaArray (fromIntegral n) f
getBlobStruct :: StructGetBlob -> FlowT '[InvalidParam,InvalidProperty] m StructGetBlob
getBlobStruct r = ioctlGetBlob r fd
`catchLiftLeft` \case
EINVAL -> throwE InvalidParam
ENOENT -> throwE InvalidProperty
e -> unhdlErr "getBlobStruct" e
-- | Get a blob
getBlob :: Word32 -> FlowT '[InvalidParam,InvalidProperty] m Buffer
getBlob bid = do
let gb = StructGetBlob
{ gbBlobId = bid
, gbLength = 0
, gbData = 0
}
gb' <- getBlobStruct gb
ptr <- mallocBytes . fromIntegral . gbLength $ gb'
void (getBlobStruct (gb' { gbData = fromIntegral (ptrToWordPtr ptr) }))
-- free ptr on error
`onFlowError_` free ptr
-- otherwise return a bytestring
bufferPackPtr (fromIntegral (gbLength gb')) ptr
withBuffers :: (Storable a, Storable b) => Word32 -> Word32 -> (Ptr a -> Ptr b -> FlowT '[InvalidParam,InvalidProperty] m c) -> FlowT '[InvalidParam,InvalidProperty] m c
withBuffers valueCount blobCount f =
liftWith (allocaArray' valueCount) $ \valuePtr ->
liftWith (allocaArray' blobCount) $ \blobPtr -> do
let gp' = StructGetProperty
{ gpsValuesPtr = fromIntegral (ptrToWordPtr valuePtr)
, gpsEnumBlobPtr = fromIntegral (ptrToWordPtr blobPtr)
, gpsPropId = pid
, gpsFlags = 0
, gpsName = emptyCStringBuffer
, gpsCountValues = valueCount
, gpsCountEnum = blobCount
}
-- nothing changes, except for the two buffers
_ <- getProperty' gp'
f valuePtr blobPtr
withValueBuffer :: Storable a => Word32 -> ([a] -> FlowT '[InvalidParam,InvalidProperty] m c) -> FlowT '[InvalidParam,InvalidProperty] m c
withValueBuffer n f = withBuffers n 0 $ \ptr (_ :: Ptr Word) ->
f =<< peekArray (fromIntegral n) ptr
withBlobBuffer n f = withBuffers 0 n $ \(_ :: Ptr Word) ptr ->
f =<< peekArray (fromIntegral n) ptr
withBuffers' n m f = withBuffers n m $ \p1 p2 -> do
vs <- peekArray (fromIntegral n) p1
bs <- peekArray (fromIntegral m) p2
f vs bs
getValues :: Word32 -> Word32 -> PropertyTypeType -> FlowT '[InvalidParam,InvalidProperty] m PropertyType
getValues nval nblob ttype = case ttype of
PropTypeObject -> return PropObject
PropTypeRange -> withValueBuffer nval (return . PropRange)
PropTypeSignedRange -> withValueBuffer nval (return . PropSignedRange)
PropTypeEnum -> withBlobBuffer nblob $ \es ->
return (PropEnum [(peValue e, fromCStringBuffer $ peName e) | e <- es])
PropTypeBitmask -> withBlobBuffer nblob $ \es ->
return (PropBitmask [(peValue e, fromCStringBuffer $ peName e) | e <- es])
PropTypeBlob -> withBuffers' nblob nblob $ \ids bids -> do
traverse getBlob bids
||> (PropBlob . (ids `zip`))
-- | Set object properties atomically
setAtomic :: MonadInIO m => Handle -> AtomicFlags -> Map ObjectID [(PropID,PropValue)] -> FlowT AtomicErrors m ()
setAtomic hdl flags objProps = do
let
kvs = Map.assocs objProps -- [(Obj,[(Prop,Val)])]
objs = fmap fst kvs -- [Obj]
pvs = fmap snd kvs -- [[(Prop,Val)]]
nprops = fmap length pvs
props = fmap fst (concat pvs) -- [Prop]
vals = fmap snd (concat pvs) -- [Val]
withArray objs $ \pobjs ->
withArray nprops $ \pnprops ->
withArray props $ \pprops ->
withArray vals $ \pvals -> do
let
toPtr = fromIntegral . ptrToWordPtr
s = StructAtomic
{ atomFlags = flags
, atomCountObjects = fromIntegral (length (Map.keys objProps))
, atomObjectsPtr = toPtr pobjs
, atomCountPropsPtr = toPtr pnprops
, atomPropsPtr = toPtr pprops
, atomPropValuesPtr = toPtr pvals
, atomReserved = 0 -- must be zero
, atomUserData = 0 -- used for event generation
}
void (ioctlAtomic s hdl)
`catchLiftLeft` \case
EBADF -> throwE InvalidHandle
EINVAL -> throwE InvalidParam
ENOMEM -> throwE MemoryError
ENOENT -> throwE EntryNotFound
ERANGE -> throwE InvalidRange
ENOSPC -> throwE InvalidRange
e -> unhdlErr "setAtomic" e
| hsyl20/ViperVM | haskus-system/src/lib/Haskus/System/Linux/Graphics/Property.hs | bsd-3-clause | 8,582 | 0 | 27 | 2,944 | 2,013 | 1,097 | 916 | 165 | 11 |
{-# LANGUAGE OverloadedStrings #-}
module Main where
import Control.Concurrent (forkIO)
import Control.Exception (try, SomeException(..))
import Control.Monad (void, when)
import qualified Data.ByteString.Char8 as BS
import Network.DNS
import Network.DNS.Cache as DNSC
import Data.Time
confs :: [ResolvConf]
confs = [
defaultResolvConf { resolvInfo = RCHostName "8.8.8.8" }
, defaultResolvConf { resolvInfo = RCHostName "8.8.4.4" }
]
maxCon :: Int
maxCon = 50
cacheConf :: DNSCacheConf
cacheConf = DNSCacheConf {
resolvConfs = confs
, maxConcurrency = maxCon
, minTTL = 60
, maxTTL = 300
, negativeTTL = 300
}
main :: IO ()
main = do
beg <- getCurrentTime
withDNSCache cacheConf (loop 1 beg)
where
loop :: Int -> UTCTime -> DNSCache -> IO ()
loop n beg cache = do
when (n `mod` 1000 == 0) $ do
cur <- getCurrentTime
putStrLn $ show n ++ ": " ++ show (cur `diffUTCTime` beg)
edom <- try BS.getLine
case edom of
Left (SomeException _) -> do
wait cache (== 0)
putStrLn "Done."
Right dom -> do
wait cache (< maxCon)
void $ forkIO (DNSC.resolve cache dom >>= p dom)
loop (n+1) beg cache
p _ (Right _) = return ()
p dom (Left e) = do
putStr $ show e ++ " "
BS.putStrLn dom
| kazu-yamamoto/concurrent-dns-cache | test/main.hs | bsd-3-clause | 1,396 | 0 | 19 | 431 | 475 | 251 | 224 | 44 | 3 |
--------------------------------------------------------------------------------
module Crypto
( Pass
, EncryptedPass
, toPass
, encryptPass
, verifyPass
)
where
import Control.Applicative ((<$>))
import Data.Text (Text)
import qualified Crypto.Scrypt as C
import qualified Data.Text.Encoding as T
import qualified Database.PostgreSQL.Simple.FromField as P
import qualified Database.PostgreSQL.Simple.ToField as P
--------------------------------------------------------------------------------
newtype Pass = Pass C.Pass
newtype EncryptedPass = EncryptedPass C.EncryptedPass
deriving (Eq, Show)
instance P.FromField EncryptedPass where
fromField f dat =
EncryptedPass . C.EncryptedPass <$> P.fromField f dat
instance P.ToField EncryptedPass where
toField (EncryptedPass (C.EncryptedPass p)) =
P.toField p
--------------------------------------------------------------------------------
toPass :: Text -> Pass
toPass pass =
Pass (C.Pass (T.encodeUtf8 pass))
encryptPass :: Pass -> IO EncryptedPass
encryptPass (Pass pass) =
EncryptedPass <$> C.encryptPassIO' pass
verifyPass :: Pass -> EncryptedPass -> Bool
verifyPass (Pass pass) (EncryptedPass encpass) =
C.verifyPass' pass encpass
--------------------------------------------------------------------------------
| mietek/untitled-wai | src/Crypto.hs | bsd-3-clause | 1,334 | 0 | 11 | 194 | 302 | 171 | 131 | 30 | 1 |
module QuadTree
(
QuadTree(..),
AABB(..),
HasAABB(..),
subQuadrants,
intersectAABB,
empty,
insert,
remove,
move,
toList,
queryIntersecting
) where
import Data.List hiding (insert)
import Control.Exception
-- | AABB with center and half dimension
data AABB = AABB (Float,Float) (Float,Float)
deriving (Eq, Ord, Show)
class HasAABB a where
getAABB :: a -> AABB
instance HasAABB AABB where
getAABB = id
intersectAABB :: AABB -> AABB -> Bool
intersectAABB (AABB (c1x,c1y) (h1x,h1y)) (AABB (c2x,c2y) (h2x,h2y)) =
intersect1D (c1x-h1x) (c1x+h1x) (c2x-h2x) (c2x+h2x)
&& intersect1D (c1y-h1y) (c1y+h1y) (c2y-h2y) (c2y+h2y)
where intersect1D x1 x2 y1 y2 = not $ x2 < y1 || x1 > y2
data Quadrant = NW | NE | SW | SE
-- | QuadTree data structure containing AABBs
data QuadTree a = Leaf
| Node AABB [a] (QuadTree a) (QuadTree a)
(QuadTree a) (QuadTree a)
deriving (Eq, Ord, Show)
subQuadrants :: AABB -> (AABB,AABB,AABB,AABB)
subQuadrants (AABB (cx,cy) (hx,hy)) =
let newHX = hx/2
newHY = hy/2
newHalfSize = (newHX, newHY) in
(AABB (cx-newHX,cy-newHY) newHalfSize,
AABB (cx+newHX,cy-newHY) newHalfSize,
AABB (cx-newHX,cy+newHY) newHalfSize,
AABB (cx+newHX,cy+newHY) newHalfSize)
empty :: AABB -> Int -> QuadTree a
empty worldSize height
| height <= 0 = Leaf
| otherwise = Node worldSize [] (child nw) (child ne)
(child sw) (child se)
where
(nw,ne,sw,se) = subQuadrants worldSize
child quadrant = empty quadrant (height-1)
intersectingChildren :: AABB -> QuadTree a -> [QuadTree a]
intersectingChildren xAABB (Node _ _ nw ne sw se) =
map snd $
filter fst $
[(intersectAABB qAABB xAABB, quadrant)
| quadrant@(Node qAABB _ _ _ _ _) <- [nw,ne,sw,se]]
intersectingQuadrants :: AABB -> QuadTree a -> [Quadrant]
intersectingQuadrants aabb (Node _ _ nw ne sw se) =
map snd $
filter fst $
[(intersectAABB qAABB aabb, quadrant)
| ((Node qAABB _ _ _ _ _),quadrant) <- [(nw,NW),(ne,NE),(sw,SW),(se,SE)]]
insert :: HasAABB a => a -> QuadTree a -> QuadTree a
insert x (Node size xs Leaf _ _ _) =
Node size (x:xs) Leaf Leaf Leaf Leaf
insert x node@(Node size xs nw ne sw se) =
let
xAABB = getAABB x in
case intersectingQuadrants xAABB node of
[NW] -> Node size xs (insert x nw) ne sw se
[NE] -> Node size xs nw (insert x ne) sw se
[SW] -> Node size xs nw ne (insert x sw) se
[SE] -> Node size xs nw ne sw (insert x se)
_ -> Node size (x:xs) nw ne sw se
queryClose :: (HasAABB a) => AABB -> QuadTree a -> [a]
queryClose _ (Node size bs Leaf _ _ _) = bs
queryClose aabb node@(Node size bs _ _ _ _) =
bs ++ concatMap (queryClose aabb) (intersectingChildren aabb node)
queryIntersecting :: (HasAABB a, HasAABB b) => a -> QuadTree b -> [b]
queryIntersecting a qtree =
let aabb = getAABB a in
filter (intersectAABB aabb . getAABB) $ queryClose aabb qtree
remove :: (HasAABB a) => (a -> a -> Bool) -> a -> QuadTree a -> QuadTree a
remove eqPred agent (Node size xs Leaf _ _ _) =
Node size (deleteBy eqPred agent xs) Leaf Leaf Leaf Leaf
remove eqPred agent node@(Node size xs nw ne sw se) =
case intersectingQuadrants (getAABB agent) node of
[NW] -> Node size xs (remove eqPred agent nw) ne sw se
[NE] -> Node size xs nw (remove eqPred agent ne) sw se
[SW] -> Node size xs nw ne (remove eqPred agent sw) se
[SE] -> Node size xs nw ne sw (remove eqPred agent se)
_ -> Node size (deleteBy eqPred agent xs) nw ne sw se
remove _ _ _ = error "cannot remove object from a leaf!"
move :: (HasAABB a) => (a -> a -> Bool) -> a -> a -> QuadTree a -> QuadTree a
move eqPred oldAgent newAgent qtree =
assert (eqPred oldAgent newAgent)
$ insert newAgent $ remove eqPred oldAgent qtree
toList :: QuadTree a -> [a]
toList Leaf = []
toList (Node _ xs nw ne sw se) =
xs ++ toList nw ++ toList ne ++ toList sw ++ toList se
| alexisVallet/haskell-shmup | QuadTree.hs | bsd-3-clause | 4,056 | 0 | 12 | 1,048 | 1,880 | 982 | 898 | 98 | 5 |
----------------------------------------------------------------------------
-- |
-- Module : Language.Core.Interpreter.Evaluable
-- Copyright : (c) Carlos López-Camey, University of Freiburg
-- License : BSD-3
--
-- Maintainer : [email protected]
-- Stability : stable
--
--
-- Contains a type class that describes data types that can be evaluated to a value
-----------------------------------------------------------------------------
{-# LANGUAGE ImplicitParams #-}
{-# LANGUAGE DoAndIfThenElse #-}
{-# LANGUAGE ScopedTypeVariables #-}
module Language.Core.Interpreter.Evaluable where
--------------------------------------------------------------------------------
-- DART
import DART.CaseAnalysis.PredicateBranch
--------------------------------------------------------------------------------
-- System
import Data.Time.Clock
import Data.Time.Clock(getCurrentTime,diffUTCTime)
import DART.CmdLine
import DART.Compiler.JIT(jitCompile)
import DART.Util.StringUtils(separateWithNewLines)
import qualified Data.HashTable.IO as H
import Data.List(find,inits)
--------------------------------------------------------------------------------
-- Language Core
import Language.Core.Interpreter.Acknowledge
import Language.Core.Interpreter.Apply
import Language.Core.Interpreter.CaseAnalysis
import Language.Core.Interpreter.Structures
import Language.Core.Module
import Language.Core.Vdefg (findVdefByName,vdefgNames,vdefName)
class Evaluable a where
eval :: a -> Env -> IM Value
instance Evaluable Lit where
eval l@(Literal coreLit ty) _ = case showExtCoreType ty of
"Tcon(ghc-prim:GHC.Prim.Int#)" -> let (Lint i) = coreLit in return . Num $ i
"Tcon(integer-gmp:GHC.Integer.Type.Integer)" -> let (Lint i) = coreLit in return . Num $ i
"Tcon(ghc-prim:GHC.Prim.Char#)" -> case coreLit of
(Lchar c) -> return . Char $ c
(Lint i) -> return . Num $ i
"Tcon(ghc-prim:GHC.Prim.Addr#)" -> let (Lstring s) = coreLit in return . String $ s
--"Rational" -> return . Rat $ r
_ -> return . Wrong $ "Could not evaluate literal of type " ++ showExtCoreType ty
-- | If the user provides an expression to evaluate, it can be either a haskell expression
-- that we can compile just in time, or a function name that is defined in the module
instance Evaluable HaskellExpression where
eval hs@(HaskellExpression expression_string m@(Module mname tdefs vdefgs)) env =
-- | Is it a function defined within the module?
case (m `moduleFindVdefByName` expression_string) of
Just vdef -> do
watchReductionM $ "Found a definition of " ++ expression_string
eval (Nonrec vdef) env
Nothing -> do
debugM $ "Did not found any function named " ++ expression_string
let
fnames = concatMap vdefgNames vdefgs -- [String]
fnames_vdefgs = zip fnames vdefgs -- [(String,Vdefg)]
jitMaybeVdef <- jitCompile hs
case jitMaybeVdef of
Just vdefg -> eval vdefg env
Nothing -> return . Wrong $ "Could not evaluate " ++ expression_string ++ " in " ++ show mname
instance Evaluable Id where
eval id env = evalId id env
-- | Looks for the address in the heap, evals a thunk if necessary to return a value
evalId :: Id -> Env -> IM Value
--evalId i e = lookupId i e >>= either (evalThunk e) return
evalId i e = do
watchReductionM $ "Evaluating variable " ++ i
ptr <- getPointer i e
case ptr of
e@(Wrong s) -> return e -- i was not found in env
Pointer (MkPointer heap_address) -> do -- we know something about i in env
eTnkVal <- lookupMem heap_address
whnf <- case eTnkVal of -- evaluate to weak head normal form
Left thunk -> do
val <- eval thunk e
h <- gets heap
io $ H.insert h heap_address (Right val)
return val
Right val -> return val -- it is already in weak head normal form
watchReductionM (show i ++ " ~> " ++ show whnf)
return whnf
instance Evaluable Vdefg where
eval vdefg env = do
refs <- evalVdefg vdefg env
case refs of
[] -> return . Wrong $ "The impossible happened"
[single_ref@(_,address)] -> eval address env
_ -> do
vals <- mapM (\(_,address) -> eval address env) refs
let mkList x = [x]
return . MkListOfValues $ zip (map mkList ['a'..'z']) vals
-- | Given an environment, looks for the address in the heap, evals a thunk using the given environment if necessary to return a value
instance Evaluable HeapAddress where
eval address env = do
beVerboseM $ "accessing address " ++ show address
eTnkVal <- lookupMem address
-- if it is a thunk, eval and memorize in heap
either (evalThnk address) return eTnkVal
where
-- evaluates a thunk and updates its value in the heap
evalThnk :: HeapAddress -> Thunk -> IM Value
evalThnk address thunk = do
val <- eval thunk env
h <- gets heap
watchReductionM $ "Storing value " ++ show val ++ " in " ++ show address
io $ H.insert h address (Right val)
return val
instance Evaluable Thunk where
--eval :: Thunk -> Env -> IM Value
eval (Thunk exp env) e = do -- TODO. To comment: Why we don't care about the second env?
ti <- gets tab_indentation
let ?tab_indentation = ti
watchReductionM $ "Evaluating thunk: " ++ showExp exp
reached_timeout <- isTimeout
case reached_timeout of
False -> eval exp (env ++ e) -- (!) passing (e ++ env) instead produces an infinite loop
True -> do
max_secs <- gets settings >>= return . show . max_time_per_function
clearTimeout
return . Wrong $ "Reached timeout of " ++ max_secs ++ " seconds"
instance Evaluable Value where
-- eval :: Value -> Env -> IM Value
eval e@(Wrong _) _ = return e
eval (Pointer ptr) env = eval ptr env
eval v env = return $ Wrong $ "Wrong Evaluable Value: " ++ show v
instance Evaluable Pointer where
eval (MkPointer address) env = eval address env
instance Evaluable Exp where
-- Integer,Char construction -- match against Qual for the benefit of speed
eval (App (Dcon ((Just (M (P ("ghczmprim"),["GHC"],"Types"))),constr)) (Lit lit)) env = case constr of
"Izh" -> eval lit []
"Czh" -> eval lit []
otherwise -> return . Wrong $ " Constructor " ++ constr ++ " is not yet implemented. Please submit a bug report"
-- f :: a -> b
-- x :: a
-- => applied_types = [a,b]
eval e@(App f_exp arg_exp) env = do
ti <- gets tab_indentation
let ?tab_indentation = ti
f_val <- evalExpI f_exp env ("Reducing lambda " ++ showExp f_exp)
-- if the argument is already in env, don't make another reference
case arg_exp of
(Var qvar) -> do
arg_val <- zDecodeQualified qvar `lookupId` env
case arg_val of
Right (Wrong _) -> mkRefAndApply f_val arg_exp -- not found,
whnf -> do
beVerboseM $ "not making a new reference, " ++ (zDecodeQualified qvar) ++ " is already in env"
mkRefAndApply f_val arg_exp
--apply f (qualifiedVar qvar) env --don't create thunk for variables in scope
_ -> mkRefAndApply f_val arg_exp
where
mkRefAndApply :: Value -> Exp -> IM Value
mkRefAndApply f arg_exp = do
beVerboseM "Making a reference from/to the function argument "
heap_reference@(id,_) <- mkHeapReference arg_exp env
watchReductionM $ "Argument saved as " ++ id
res <- apply f id (heap_reference:env) -- the address is passed
return res
-- | Typed function (or data) application
eval e@(Appt exp typ) env = do
ti <- gets tab_indentation
let ?tab_indentation = ti
case exp of
(Var qvar) -> evalExpI exp env "Typed Var application "
(Dcon qvar) -> do
v <- evalExpI exp env $ "Application of typed data constructor \"" ++ zDecodeQualified qvar ++ "\" with type = " ++ showType typ
case v of
-- if data constructor expects a polymorphic type, look for the variable and make a reference, i.e., instance the type variable.
(TyCon
tycon@(MkDataCon _
datacon_sig@(Tvar(poly_var):_)
ty_env)
tyname) -> do
watchReductionM $ "Instancing type \"" ++ poly_var ++ "\" to " ++ (showType typ)
let
-- if polytype variable is found, give it the type
mapSigTy :: Ty -> Ty --instance the Tvar to `typ`
mapSigTy poly_ty@(Tvar poly_var') | poly_var' == poly_var = typ
| otherwise = poly_ty
mapSigTy sigty = sigty
let
isPolytype :: Ty -> Bool
isPolytype (Tvar _) = True
isPolytype _ = False
let
polytype :: Maybe Ty
polytype = find isPolytype datacon_sig
new_type_env :: TypeEnv
new_type_env = case polytype of
Just polyvar@(Tvar var_name) -> (var_name,polyvar):ty_env
_ -> ty_env
return $ TyCon (tycon { signature = map mapSigTy datacon_sig,
context = new_type_env })
tyname
TyCon tycon@(MkDataCon _ (t:ts) applied_types') tyname -> do
watchReductionM $ "Applied types: " ++ (show applied_types')
watchReductionM $ "Creating annotated type constructor from " ++ (show t) ++ " to " ++ (tyname)
return $ TyCon (tycon { signature = ts}) tyname
--tyconapp@(TyConApp _ _) -> return tyconapp
otherwise' -> return $ Wrong $ "The impossible happened: Typed application was expecting a type constructor or an applied type constructor, but got: " ++ show otherwise'
_ -> evalExpI exp env "Typed application "
-- ($) :: (a -> b) -> a -> b
eval (Var ((Just (M (P ("base"),["GHC"],"Base"))),"zd")) env =
let
applyFun :: Value -> IM Value
applyFun (Fun f dsc) = do return $ Fun f ("($) " ++ dsc)
applyFun x = return . Wrong $ "($), Applying something that is not a function, namely: " ++ show x
-- takes a function `f` and returns a function `g` that applies `f` to its argument
ap id e = evalId id e >>= applyFun
in return $ Fun ap "($) :: (a -> b) -> a -> b"
-- lambda abstraction over types variables
-- returns a Fun value
eval e@(Lam (Vb (var_name,ty)) exp) env = do
--whenFlag show_subexpressions $ indentExp e >>= \e -> debugM $ "Evaluating subexpression " ++ e
watchReductionM $ "Making function from lambda abstraction \\" ++ var_name ++ " :: " ++ showType ty ++ " -> exp "
-- If we abstract over a variable with a free type variable
-- e.g. \zddNumTmp :: Num a
-- and we are applying it, we should assume some value before reducing its value
case exp of
App _ (Var x) -> if (zDecodeQualified x == showType ty)
then return (Fun mkFun $ "\\" ++ var_name ++ " -> exp")
else return (Fun mkFun $ "\\" ++ var_name ++ " -> exp")
exp' -> do
ti <- gets tab_indentation
let ?tab_indentation = ti
return $ Fun mkFun $ "\\" ++ var_name ++ " -> exp"
where
-- a function that receives an identifier that points to some address
-- makes a pointer from the variable we are abstracting over to found the address
-- and computes a value in a new constructed environment
mkFun :: Id -> Env -> IM Value
mkFun id env' = do
ptr <- getPointer id env'
case ptr of
Pointer ptr -> evalExpI exp ((var_name,ptr_address ptr):env) "Evaluating Lambda body (exp)"
w@(Wrong _) -> return w
-- lambda abstraction over a type variable
eval e@(Lam (Tb (var_name,_)) exp) env = do
whenFlag show_subexpressions $ indentExp e >>= \e -> debugM $ "Evaluating subexpression " ++ e
watchReductionM $ "Saving type " ++ var_name ++ " as a free type var"
free_type_ref <- memorize (Right . FreeTypeVariable $ var_name) var_name
evalExpI exp (free_type_ref:env) "Evaluating lambda body (exp)"
-- Qualified variables that should be in the environment
eval e@(Var qvar) env = do
maybePtr <- mkPointer (zDecodeQualified qvar) env
case maybePtr of
Just ptr -> eval ptr env
Nothing -> return $ Wrong $ "Could not find var in env " ++ zDecodeQualified qvar ++ ".."
eval (Dcon qcon) env = getPointer (zDecodeQualified qcon) env >>= flip eval env
-- Case of
eval (Case case_exp vbind@(vbind_var,ty) gen_ty alts) env = analyzeCase case_exp vbind env alts >>= evalAnalysis
where
evalAnalysis :: CaseAnalysis -> IM Value
evalAnalysis (CaseAnalysisResult exp matched_alt exp_ref exp_value) = do
case matched_alt of
-- If a matched alternative was not found, check if an error has happened, and propagate that. If there is no error and there is no matched alternative, there is an unexhaustive pattern matching error
Nothing -> return $ case exp_value of
err@(Wrong msg) -> err
_ -> Wrong $ "Unexhaustive pattern matching of " ++ vbind_var
-- if a pattern matched against the expression value, we should evaluate the expression body to which that pattern corresponds
Just matched_pattern -> do
recordBranch case_exp exp_value env
-- bind those free variables contained in the matched alternative pattern
watchReductionM $ "Binding free variables contained in the matched pattern"
free_vars_env <- mkAltEnv exp_value matched_pattern
eval (patternExpression matched_pattern) (exp_ref:(free_vars_env ++ env))
eval (Lit lit) _ = eval lit []
eval (Let vdefg exp) env = do
env' <- acknowledgeVdefg vdefg env
eval exp (env' ++ env)
-- | Otherwise
eval otherExp _ = indentExp otherExp >>= \expStr -> return . Wrong $ " TODO: {" ++ expStr ++ "}\nPlease submit a bug report"
-- | Given a case analysis expression, build an analysis data type that contains
-- about the execution of pattern matching
analyzeCase :: Exp -> Vbind -> Env -> [Alt] -> IM CaseAnalysis
analyzeCase case_exp (var_to_bind,_) env alts = do
watchSMT $ "\tDoing case analysis for " ++ show var_to_bind
-- bind the exp to the var_to_bind in the heap, it might be used within the scope of the alternative's expression body
heap_reference@(id,address) <- memorize (mkThunk case_exp env) var_to_bind
exp_value <- eval address (heap_reference:env)
matched_alt <- findMatch exp_value alts
return $ CaseAnalysisResult {
analysis_expression = case_exp,
matched_alternative = matched_alt,
expression_ref = heap_reference,
expression_value = exp_value
}
-- | Given an alternative and a value that matches the alternative,
-- binds the free variables in memory and returns them in an environment
mkAltEnv :: Value -> Alt -> IM Env
mkAltEnv v@(Num _) (Acon (_,"Izh") _ [(vbind_var,vbind_ty)] _) =
do -- bind a single number
beVerboseM $ "Binding " ++ vbind_var ++ " to val " ++ show v
heap_ref <- memorize (Right v) vbind_var
return [heap_ref]
mkAltEnv (Num n) (Acon q tbinds vbinds exp) = (debugM $ "what is this number @mkAltEnv???" ++ show q) >> return []
mkAltEnv (TyConApp (MkDataCon id _ _) pointers) (Acon _ _ vbinds _) = do
if (length pointers < length vbinds)
then do
let
evalPtr = flip eval []
vbindss = show vbinds
showLength = show . length
vals <- mapM evalPtr pointers
error $ "The impossible happened @mkAltEnv, length vals (" ++ (show . length $ pointers) ++ ") /= length vbinds (" ++ (show . length $ vbinds) ++ ") " ++ ", for type constructor" ++ id ++ "\n\tvals: " ++ (show vals) ++ "\n\tvbinds: " ++ vbindss
else do
-- get vals and memorize that
let ids = (map fst vbinds)
return $ ids `zip` (map ptr_address pointers)
-- mapM (uncurry memorize) (addresses `zip` (map fst vbinds))
mkAltEnv val alt = do
watchReductionM $ "Returning empty env (didn't bind anything)"
return []
-- | Tries to find an alternative that matches a value. It returns the first match, if any.
-- According to [GHC](http://hackage.haskell.org/trac/ghc/wiki/Commentary/Compiler/CoreSynType)
-- the DEFAULT alternative must come first, then constructors, then lits
findMatch :: Value -> [Alt] -> IM (Maybe Alt)
findMatch val [] = return Nothing -- no alt
findMatch val (def_alt@(Adefault exp):alts) =
do
-- a default alt, if none of the remaining alternatives match, match always with this alternative
otherMatch <- findMatch val alts
case otherMatch of
found@(Just alt) -> return found
_ -> return . Just $ def_alt
findMatch val (a:alts) = do
ti <- gets tab_indentation
let ?tab_indentation = ti
watchReductionM $ "Comparing " ++ show val ++ " and " ++ showAlt a
matches <- val `matches` a
if (not matches)
then val `findMatch` alts
else do
watchReductionM $ "Found case match for" ++ show val
return . Just $ a
matches :: Value -> Alt -> IM Bool
-- data
matches (TyConApp (MkDataCon n _ _) _) (Acon qual_dcon _ _ _) = return $ zDecodeQualified qual_dcon == n
matches (TyCon (MkDataCon n _ _) _) (Acon qual_dcon _ _ _) = return $ zDecodeQualified qual_dcon == n
--matches val (Alit lit exp) = return False --TODO
matches val (Adefault _) = return True -- this is the default case, i.e. "_ -> exp "
-- primitives
matches (Num n) (Acon qdcon _ _ _) = do
watchReductionM $ "Trying to match a Num value (" ++ show n ++ ") with the type constructed by " ++ zDecodeQualified qdcon
let matches' = zDecodeQualified qdcon == "ghc-prim:GHC.Types.I#"
watchReductionM $ "\t.. " ++ if matches' then " matches" else " does not match"
return matches'
-- lits
matches (Rat n) (Alit (Literal (Lrational r) _) exp) | n == r = return True
matches (Char c) (Alit (Literal (Lchar c2) _) exp) | c == c2 = return True
matches (String s) (Alit (Literal (Lstring s2) _) _) | s == s2 = return True
matches (Num n) (Alit (Literal (Lint i) _) exp) | n == i = return True
matches (Num n) (Alit (Literal (Lint i) _) exp) | otherwise = return False
matches (Boolean False) (Acon qdcon _ _ _) = return $ zDecodeQualified qdcon == "ghc-prim:GHC.Types.False"
matches (Boolean True) (Acon qdcon _ _ _) = return $ zDecodeQualified qdcon == "ghc-prim:GHC.Types.True"
-- We keep a String value as a separate data type, but in Haskell it is a list of chars
matches (String _) (Acon (Just (M (P ("ghczmprim"),["GHC"],"Types")),"ZMZN") _ _ _) = return True
matches (String _) (Acon (Just (M (P ("ghczmprim"),["GHC"],"Types")),"ZC") tbinds vbinds exp) = return True
matches e@(Wrong s) _ = return False
--match against list cons
val `matches` alt = do
ti <- gets tab_indentation
let ?tab_indentation = ti
io . putStrLn $ "??? Matching " ++ show val ++ " with " ++ show alt
-- io . putStrLn $
return False
patternExpression :: Alt -> Exp
patternExpression (Acon _ _ _ exp) = exp
patternExpression (Alit _ exp) = exp
patternExpression (Adefault exp) = exp
evalFails :: String -> IM (Either Thunk Value)
evalFails = return . Right . Wrong
-- | Does the same as evalExp but indents and deindents for debugging output purposes
evalExpI :: Exp -> Env -> String -> IM Value
evalExpI exp env desc = do
ti <- gets tab_indentation
let ?tab_indentation = ti
watchReductionM $ desc ++ " => {"
debugSubexpression exp
increaseIndentation
res <- eval exp env
watchReductionM $ "=> " ++ show res
decreaseIndentation
watchReductionM $ "}" -- debugMStepEnd
return res
-- | Function application
apply :: Value -> Id -> Env -> IM Value
apply (Fun f d) id env = do
watchReductionM $ "applying function " ++ d ++ " to " ++ id
res <- f id env
watchReductionM $ "apply " ++ d ++ " to " ++ id ++ " => " ++ show res
return res
-- Applies a (possibly applied) type constructor that expects appliedValue of type ty.
-- The type constructor that we are applying has |vals| applied values
apply (TyConApp tycon@(MkDataCon datacon_name' signature' applied_types') addresses ) id env = do
addr <- getPointer id env
case addr of
Pointer p -> case signature' of
(sig_head:sig_tail) -> do
watchReductionM $ "Reducing type constructor by " ++ show (sig_head)
return $ TyConApp tycon { signature = sig_tail } (addresses ++ [p])
[] -> do
watchReductionM $ "Type constructor's signature has no types left that require reduction."
return $ TyConApp tycon { signature = [] } (addresses ++ [p])
e@(Wrong s) -> return e
--apply tca@(TyConApp tycon@(MkDataCon _ ts) addresses) id env = return . Wrong $ "Applying " ++ (show . length) ts ++ " with argument " ++ show id
apply (TyCon tycon@(MkDataCon _ (t:ts) applied_types') _) id env = do
addr <- getPointer id env
case addr of
Pointer p -> return $ TyConApp (tycon { signature = ts }) ([p])
e@(Wrong s) -> return e
apply w@(Wrong _) _ env = return w
apply f x env = do
xstr <- evalId x env
return . Wrong $ "Applying " ++ show f ++ " with argument " ++ (show xstr)
-- evalVdefgBenchmark :: Vdefg -> Env -> IM [HeapReference]
-- evalVdefgBenchmark vdefg env = do
-- debugM $ "doEvalVdefg; env.size == " ++ (show . length $ env)
-- beforeTime <- liftIO getCurrentTime
-- h <- gets heap
-- heap_refs <- evalVdefg vdefg env
-- mapM_ (benchmark beforeTime) heap_refs
-- debugM $ "doEvalVdefg.heap_refs: " ++ show heap_refs
-- debugM $ "doEvalVdefg.heap_refs.size: " ++ show (length heap_refs)
-- return heap_refs
-- where
-- benchmark :: UTCTime -> HeapReference -> IM ()
-- benchmark before heapRef@(id,heap_address) = do
-- sttgs <- gets settings
-- res <- lookupMem heap_address
-- afterTime <- liftIO getCurrentTime
-- let
-- time = afterTime `diffUTCTime` before
-- -- TODO: replace with new flag --benchmark
-- should_print = debug sttgs && show_tmp_variables sttgs
-- || debug sttgs && show_tmp_variables sttgs
-- (when should_print) $ do
-- debugM $ "Evaluation of " ++ id
-- debugM $ "\t.. done in " ++ show time ++ "\n\t.. and resulted in " ++ show res
-- | Evaluate a value definition, which can be either recursive or not recursive
-- | Returns either one heap reference when the definition is non recursive
-- or a list of these when it is recursive
evalVdefg :: Vdefg -> Env -> IM [HeapReference]
evalVdefg (Rec vdefs) env = mapM (flip evalVdefg env . Nonrec) vdefs >>= return . concat
evalVdefg (Nonrec (Vdef (qvar, ty, exp))) env = do
debugMStep $ "Evaluating value definition: " ++ zDecodeQualified qvar
whenFlag show_expressions $ indentExp exp >>= debugM . (++) "Expression: "
increaseIndentation
io . putStrLn $ "Callng benchmarkIM"
res <- benchmarkIM (zDecodeQualified qvar) $ eval exp env -- result ****
decreaseIndentation
heap_ref <- memorize (mkVal res) (zDecodeQualified qvar) -- ***
return [heap_ref]
-- | If the flag --benchmark was provided, the given computation is
-- then benchmarked
benchmarkIM :: Id -> IM a -> IM a
benchmarkIM id computation = do
before <- liftIO getCurrentTime
computed <- computation
after <- liftIO getCurrentTime
-- should we add the benchmark?
whenFlag benchmark $ modify $ addBenchmark id (after `diffUTCTime` before)
return computed
where
addBenchmark :: Id -> NominalDiffTime -> DARTState -> DARTState
addBenchmark id difftime st = st {
benchmarks = (id,difftime):(benchmarks st)
}
| kmels/dart-haskell | src/Language/Core/Interpreter/Evaluable.hs | bsd-3-clause | 24,168 | 78 | 32 | 6,353 | 5,925 | 2,973 | 2,952 | 358 | 4 |
module PCA
( fitPCA, transformPCA
) where
import PCA.QModels
import Numeric.LinearAlgebra.Data
(Matrix, Vector, toRows, fromRows)
import qualified Numeric.LinearAlgebra.HMatrix as H
-- | Theta parameteres which we will you for predict in model
data PCAModel = PCAModel
{ scale :: Matrix Double -- ^ W
, shift :: Vector Double -- ^ \mu
}
fitPCA :: Matrix Double -> PCAModel
fitPCA t =
let (w,mu) = calculateQ t
in PCAModel
{ scale = w
, shift = mu
}
transformPCA :: PCAModel -> Matrix Double -> Matrix Double
transformPCA (PCAModel w mu) t =
fromRows
(map
(\tn ->
tn + mu)
(toRows (t H.<> w)))
| DbIHbKA/vbpca | src/PCA.hs | bsd-3-clause | 716 | 0 | 11 | 227 | 206 | 116 | 90 | 22 | 1 |
{-
(c) The GRASP/AQUA Project, Glasgow University, 1993-1998
\section[Specialise]{Stamping out overloading, and (optionally) polymorphism}
-}
{-# LANGUAGE CPP #-}
module Specialise ( specProgram, specUnfolding ) where
#include "HsVersions.h"
import Id
import TcType hiding( substTy )
import Type hiding( substTy, extendTvSubstList )
import Coercion( Coercion )
import Module( Module, HasModule(..) )
import CoreMonad
import qualified CoreSubst
import CoreUnfold
import VarSet
import VarEnv
import CoreSyn
import Rules
import CoreUtils ( exprIsTrivial, applyTypeToArgs, mkCast )
import CoreFVs ( exprFreeVars, exprsFreeVars, idFreeVars )
import UniqSupply
import Name
import MkId ( voidArgId, voidPrimId )
import Maybes ( catMaybes, isJust )
import BasicTypes
import HscTypes
import Bag
import DynFlags
import Util
import Outputable
import FastString
import State
import UniqDFM
import Control.Monad
#if __GLASGOW_HASKELL__ > 710
import qualified Control.Monad.Fail as MonadFail
#endif
import Data.Map (Map)
import qualified Data.Map as Map
import qualified FiniteMap as Map
{-
************************************************************************
* *
\subsection[notes-Specialise]{Implementation notes [SLPJ, Aug 18 1993]}
* *
************************************************************************
These notes describe how we implement specialisation to eliminate
overloading.
The specialisation pass works on Core
syntax, complete with all the explicit dictionary application,
abstraction and construction as added by the type checker. The
existing type checker remains largely as it is.
One important thought: the {\em types} passed to an overloaded
function, and the {\em dictionaries} passed are mutually redundant.
If the same function is applied to the same type(s) then it is sure to
be applied to the same dictionary(s)---or rather to the same {\em
values}. (The arguments might look different but they will evaluate
to the same value.)
Second important thought: we know that we can make progress by
treating dictionary arguments as static and worth specialising on. So
we can do without binding-time analysis, and instead specialise on
dictionary arguments and no others.
The basic idea
~~~~~~~~~~~~~~
Suppose we have
let f = <f_rhs>
in <body>
and suppose f is overloaded.
STEP 1: CALL-INSTANCE COLLECTION
We traverse <body>, accumulating all applications of f to types and
dictionaries.
(Might there be partial applications, to just some of its types and
dictionaries? In principle yes, but in practice the type checker only
builds applications of f to all its types and dictionaries, so partial
applications could only arise as a result of transformation, and even
then I think it's unlikely. In any case, we simply don't accumulate such
partial applications.)
STEP 2: EQUIVALENCES
So now we have a collection of calls to f:
f t1 t2 d1 d2
f t3 t4 d3 d4
...
Notice that f may take several type arguments. To avoid ambiguity, we
say that f is called at type t1/t2 and t3/t4.
We take equivalence classes using equality of the *types* (ignoring
the dictionary args, which as mentioned previously are redundant).
STEP 3: SPECIALISATION
For each equivalence class, choose a representative (f t1 t2 d1 d2),
and create a local instance of f, defined thus:
f@t1/t2 = <f_rhs> t1 t2 d1 d2
f_rhs presumably has some big lambdas and dictionary lambdas, so lots
of simplification will now result. However we don't actually *do* that
simplification. Rather, we leave it for the simplifier to do. If we
*did* do it, though, we'd get more call instances from the specialised
RHS. We can work out what they are by instantiating the call-instance
set from f's RHS with the types t1, t2.
Add this new id to f's IdInfo, to record that f has a specialised version.
Before doing any of this, check that f's IdInfo doesn't already
tell us about an existing instance of f at the required type/s.
(This might happen if specialisation was applied more than once, or
it might arise from user SPECIALIZE pragmas.)
Recursion
~~~~~~~~~
Wait a minute! What if f is recursive? Then we can't just plug in
its right-hand side, can we?
But it's ok. The type checker *always* creates non-recursive definitions
for overloaded recursive functions. For example:
f x = f (x+x) -- Yes I know its silly
becomes
f a (d::Num a) = let p = +.sel a d
in
letrec fl (y::a) = fl (p y y)
in
fl
We still have recusion for non-overloaded functions which we
speciailise, but the recursive call should get specialised to the
same recursive version.
Polymorphism 1
~~~~~~~~~~~~~~
All this is crystal clear when the function is applied to *constant
types*; that is, types which have no type variables inside. But what if
it is applied to non-constant types? Suppose we find a call of f at type
t1/t2. There are two possibilities:
(a) The free type variables of t1, t2 are in scope at the definition point
of f. In this case there's no problem, we proceed just as before. A common
example is as follows. Here's the Haskell:
g y = let f x = x+x
in f y + f y
After typechecking we have
g a (d::Num a) (y::a) = let f b (d'::Num b) (x::b) = +.sel b d' x x
in +.sel a d (f a d y) (f a d y)
Notice that the call to f is at type type "a"; a non-constant type.
Both calls to f are at the same type, so we can specialise to give:
g a (d::Num a) (y::a) = let f@a (x::a) = +.sel a d x x
in +.sel a d (f@a y) (f@a y)
(b) The other case is when the type variables in the instance types
are *not* in scope at the definition point of f. The example we are
working with above is a good case. There are two instances of (+.sel a d),
but "a" is not in scope at the definition of +.sel. Can we do anything?
Yes, we can "common them up", a sort of limited common sub-expression deal.
This would give:
g a (d::Num a) (y::a) = let +.sel@a = +.sel a d
f@a (x::a) = +.sel@a x x
in +.sel@a (f@a y) (f@a y)
This can save work, and can't be spotted by the type checker, because
the two instances of +.sel weren't originally at the same type.
Further notes on (b)
* There are quite a few variations here. For example, the defn of
+.sel could be floated ouside the \y, to attempt to gain laziness.
It certainly mustn't be floated outside the \d because the d has to
be in scope too.
* We don't want to inline f_rhs in this case, because
that will duplicate code. Just commoning up the call is the point.
* Nothing gets added to +.sel's IdInfo.
* Don't bother unless the equivalence class has more than one item!
Not clear whether this is all worth it. It is of course OK to
simply discard call-instances when passing a big lambda.
Polymorphism 2 -- Overloading
~~~~~~~~~~~~~~
Consider a function whose most general type is
f :: forall a b. Ord a => [a] -> b -> b
There is really no point in making a version of g at Int/Int and another
at Int/Bool, because it's only instancing the type variable "a" which
buys us any efficiency. Since g is completely polymorphic in b there
ain't much point in making separate versions of g for the different
b types.
That suggests that we should identify which of g's type variables
are constrained (like "a") and which are unconstrained (like "b").
Then when taking equivalence classes in STEP 2, we ignore the type args
corresponding to unconstrained type variable. In STEP 3 we make
polymorphic versions. Thus:
f@t1/ = /\b -> <f_rhs> t1 b d1 d2
We do this.
Dictionary floating
~~~~~~~~~~~~~~~~~~~
Consider this
f a (d::Num a) = let g = ...
in
...(let d1::Ord a = Num.Ord.sel a d in g a d1)...
Here, g is only called at one type, but the dictionary isn't in scope at the
definition point for g. Usually the type checker would build a
definition for d1 which enclosed g, but the transformation system
might have moved d1's defn inward. Solution: float dictionary bindings
outwards along with call instances.
Consider
f x = let g p q = p==q
h r s = (r+s, g r s)
in
h x x
Before specialisation, leaving out type abstractions we have
f df x = let g :: Eq a => a -> a -> Bool
g dg p q = == dg p q
h :: Num a => a -> a -> (a, Bool)
h dh r s = let deq = eqFromNum dh
in (+ dh r s, g deq r s)
in
h df x x
After specialising h we get a specialised version of h, like this:
h' r s = let deq = eqFromNum df
in (+ df r s, g deq r s)
But we can't naively make an instance for g from this, because deq is not in scope
at the defn of g. Instead, we have to float out the (new) defn of deq
to widen its scope. Notice that this floating can't be done in advance -- it only
shows up when specialisation is done.
User SPECIALIZE pragmas
~~~~~~~~~~~~~~~~~~~~~~~
Specialisation pragmas can be digested by the type checker, and implemented
by adding extra definitions along with that of f, in the same way as before
f@t1/t2 = <f_rhs> t1 t2 d1 d2
Indeed the pragmas *have* to be dealt with by the type checker, because
only it knows how to build the dictionaries d1 and d2! For example
g :: Ord a => [a] -> [a]
{-# SPECIALIZE f :: [Tree Int] -> [Tree Int] #-}
Here, the specialised version of g is an application of g's rhs to the
Ord dictionary for (Tree Int), which only the type checker can conjure
up. There might not even *be* one, if (Tree Int) is not an instance of
Ord! (All the other specialision has suitable dictionaries to hand
from actual calls.)
Problem. The type checker doesn't have to hand a convenient <f_rhs>, because
it is buried in a complex (as-yet-un-desugared) binding group.
Maybe we should say
f@t1/t2 = f* t1 t2 d1 d2
where f* is the Id f with an IdInfo which says "inline me regardless!".
Indeed all the specialisation could be done in this way.
That in turn means that the simplifier has to be prepared to inline absolutely
any in-scope let-bound thing.
Again, the pragma should permit polymorphism in unconstrained variables:
h :: Ord a => [a] -> b -> b
{-# SPECIALIZE h :: [Int] -> b -> b #-}
We *insist* that all overloaded type variables are specialised to ground types,
(and hence there can be no context inside a SPECIALIZE pragma).
We *permit* unconstrained type variables to be specialised to
- a ground type
- or left as a polymorphic type variable
but nothing in between. So
{-# SPECIALIZE h :: [Int] -> [c] -> [c] #-}
is *illegal*. (It can be handled, but it adds complication, and gains the
programmer nothing.)
SPECIALISING INSTANCE DECLARATIONS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
instance Foo a => Foo [a] where
...
{-# SPECIALIZE instance Foo [Int] #-}
The original instance decl creates a dictionary-function
definition:
dfun.Foo.List :: forall a. Foo a -> Foo [a]
The SPECIALIZE pragma just makes a specialised copy, just as for
ordinary function definitions:
dfun.Foo.List@Int :: Foo [Int]
dfun.Foo.List@Int = dfun.Foo.List Int dFooInt
The information about what instance of the dfun exist gets added to
the dfun's IdInfo in the same way as a user-defined function too.
Automatic instance decl specialisation?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Can instance decls be specialised automatically? It's tricky.
We could collect call-instance information for each dfun, but
then when we specialised their bodies we'd get new call-instances
for ordinary functions; and when we specialised their bodies, we might get
new call-instances of the dfuns, and so on. This all arises because of
the unrestricted mutual recursion between instance decls and value decls.
Still, there's no actual problem; it just means that we may not do all
the specialisation we could theoretically do.
Furthermore, instance decls are usually exported and used non-locally,
so we'll want to compile enough to get those specialisations done.
Lastly, there's no such thing as a local instance decl, so we can
survive solely by spitting out *usage* information, and then reading that
back in as a pragma when next compiling the file. So for now,
we only specialise instance decls in response to pragmas.
SPITTING OUT USAGE INFORMATION
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To spit out usage information we need to traverse the code collecting
call-instance information for all imported (non-prelude?) functions
and data types. Then we equivalence-class it and spit it out.
This is done at the top-level when all the call instances which escape
must be for imported functions and data types.
*** Not currently done ***
Partial specialisation by pragmas
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What about partial specialisation:
k :: (Ord a, Eq b) => [a] -> b -> b -> [a]
{-# SPECIALIZE k :: Eq b => [Int] -> b -> b -> [a] #-}
or even
{-# SPECIALIZE k :: Eq b => [Int] -> [b] -> [b] -> [a] #-}
Seems quite reasonable. Similar things could be done with instance decls:
instance (Foo a, Foo b) => Foo (a,b) where
...
{-# SPECIALIZE instance Foo a => Foo (a,Int) #-}
{-# SPECIALIZE instance Foo b => Foo (Int,b) #-}
Ho hum. Things are complex enough without this. I pass.
Requirements for the simplifer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The simplifier has to be able to take advantage of the specialisation.
* When the simplifier finds an application of a polymorphic f, it looks in
f's IdInfo in case there is a suitable instance to call instead. This converts
f t1 t2 d1 d2 ===> f_t1_t2
Note that the dictionaries get eaten up too!
* Dictionary selection operations on constant dictionaries must be
short-circuited:
+.sel Int d ===> +Int
The obvious way to do this is in the same way as other specialised
calls: +.sel has inside it some IdInfo which tells that if it's applied
to the type Int then it should eat a dictionary and transform to +Int.
In short, dictionary selectors need IdInfo inside them for constant
methods.
* Exactly the same applies if a superclass dictionary is being
extracted:
Eq.sel Int d ===> dEqInt
* Something similar applies to dictionary construction too. Suppose
dfun.Eq.List is the function taking a dictionary for (Eq a) to
one for (Eq [a]). Then we want
dfun.Eq.List Int d ===> dEq.List_Int
Where does the Eq [Int] dictionary come from? It is built in
response to a SPECIALIZE pragma on the Eq [a] instance decl.
In short, dfun Ids need IdInfo with a specialisation for each
constant instance of their instance declaration.
All this uses a single mechanism: the SpecEnv inside an Id
What does the specialisation IdInfo look like?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The SpecEnv of an Id maps a list of types (the template) to an expression
[Type] |-> Expr
For example, if f has this RuleInfo:
[Int, a] -> \d:Ord Int. f' a
it means that we can replace the call
f Int t ===> (\d. f' t)
This chucks one dictionary away and proceeds with the
specialised version of f, namely f'.
What can't be done this way?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There is no way, post-typechecker, to get a dictionary for (say)
Eq a from a dictionary for Eq [a]. So if we find
==.sel [t] d
we can't transform to
eqList (==.sel t d')
where
eqList :: (a->a->Bool) -> [a] -> [a] -> Bool
Of course, we currently have no way to automatically derive
eqList, nor to connect it to the Eq [a] instance decl, but you
can imagine that it might somehow be possible. Taking advantage
of this is permanently ruled out.
Still, this is no great hardship, because we intend to eliminate
overloading altogether anyway!
A note about non-tyvar dictionaries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some Ids have types like
forall a,b,c. Eq a -> Ord [a] -> tau
This seems curious at first, because we usually only have dictionary
args whose types are of the form (C a) where a is a type variable.
But this doesn't hold for the functions arising from instance decls,
which sometimes get arguments with types of form (C (T a)) for some
type constructor T.
Should we specialise wrt this compound-type dictionary? We used to say
"no", saying:
"This is a heuristic judgement, as indeed is the fact that we
specialise wrt only dictionaries. We choose *not* to specialise
wrt compound dictionaries because at the moment the only place
they show up is in instance decls, where they are simply plugged
into a returned dictionary. So nothing is gained by specialising
wrt them."
But it is simpler and more uniform to specialise wrt these dicts too;
and in future GHC is likely to support full fledged type signatures
like
f :: Eq [(a,b)] => ...
************************************************************************
* *
\subsubsection{The new specialiser}
* *
************************************************************************
Our basic game plan is this. For let(rec) bound function
f :: (C a, D c) => (a,b,c,d) -> Bool
* Find any specialised calls of f, (f ts ds), where
ts are the type arguments t1 .. t4, and
ds are the dictionary arguments d1 .. d2.
* Add a new definition for f1 (say):
f1 = /\ b d -> (..body of f..) t1 b t3 d d1 d2
Note that we abstract over the unconstrained type arguments.
* Add the mapping
[t1,b,t3,d] |-> \d1 d2 -> f1 b d
to the specialisations of f. This will be used by the
simplifier to replace calls
(f t1 t2 t3 t4) da db
by
(\d1 d1 -> f1 t2 t4) da db
All the stuff about how many dictionaries to discard, and what types
to apply the specialised function to, are handled by the fact that the
SpecEnv contains a template for the result of the specialisation.
We don't build *partial* specialisations for f. For example:
f :: Eq a => a -> a -> Bool
{-# SPECIALISE f :: (Eq b, Eq c) => (b,c) -> (b,c) -> Bool #-}
Here, little is gained by making a specialised copy of f.
There's a distinct danger that the specialised version would
first build a dictionary for (Eq b, Eq c), and then select the (==)
method from it! Even if it didn't, not a great deal is saved.
We do, however, generate polymorphic, but not overloaded, specialisations:
f :: Eq a => [a] -> b -> b -> b
... SPECIALISE f :: [Int] -> b -> b -> b ...
Hence, the invariant is this:
*** no specialised version is overloaded ***
************************************************************************
* *
\subsubsection{The exported function}
* *
************************************************************************
-}
-- | Specialise calls to type-class overloaded functions occuring in a program.
specProgram :: ModGuts -> CoreM ModGuts
specProgram guts@(ModGuts { mg_module = this_mod
, mg_rules = local_rules
, mg_binds = binds })
= do { dflags <- getDynFlags
-- Specialise the bindings of this module
; (binds', uds) <- runSpecM dflags this_mod (go binds)
-- Specialise imported functions
; hpt_rules <- getRuleBase
; let rule_base = extendRuleBaseList hpt_rules local_rules
; (new_rules, spec_binds) <- specImports dflags this_mod top_env emptyVarSet
[] rule_base (ud_calls uds)
-- Don't forget to wrap the specialized bindings with bindings
-- for the needed dictionaries.
-- See Note [Wrap bindings returned by specImports]
; let spec_binds' = wrapDictBinds (ud_binds uds) spec_binds
; let final_binds
| null spec_binds' = binds'
| otherwise = Rec (flattenBinds spec_binds') : binds'
-- Note [Glom the bindings if imported functions are specialised]
; return (guts { mg_binds = final_binds
, mg_rules = new_rules ++ local_rules }) }
where
-- We need to start with a Subst that knows all the things
-- that are in scope, so that the substitution engine doesn't
-- accidentally re-use a unique that's already in use
-- Easiest thing is to do it all at once, as if all the top-level
-- decls were mutually recursive
top_env = SE { se_subst = CoreSubst.mkEmptySubst $ mkInScopeSet $ mkVarSet $
bindersOfBinds binds
, se_interesting = emptyVarSet }
go [] = return ([], emptyUDs)
go (bind:binds) = do (binds', uds) <- go binds
(bind', uds') <- specBind top_env bind uds
return (bind' ++ binds', uds')
{-
Note [Wrap bindings returned by specImports]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'specImports' returns a set of specialized bindings. However, these are lacking
necessary floated dictionary bindings, which are returned by
UsageDetails(ud_binds). These dictionaries need to be brought into scope with
'wrapDictBinds' before the bindings returned by 'specImports' can be used. See,
for instance, the 'specImports' call in 'specProgram'.
Note [Disabling cross-module specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since GHC 7.10 we have performed specialisation of INLINEABLE bindings living
in modules outside of the current module. This can sometimes uncover user code
which explodes in size when aggressively optimized. The
-fno-cross-module-specialise option was introduced to allow users to being
bitten by such instances to revert to the pre-7.10 behavior.
See Trac #10491
-}
-- | Specialise a set of calls to imported bindings
specImports :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these ones
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module and the home package
-- (but not external packages, which can change)
-> CallDetails -- Calls for imported things, and floating bindings
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
-- See Note [Wrapping bindings returned by specImports]
specImports dflags this_mod top_env done callers rule_base cds
-- See Note [Disabling cross-module specialisation]
| not $ gopt Opt_CrossModuleSpecialise dflags =
return ([], [])
| otherwise =
do { let import_calls = dVarEnvElts cds
; (rules, spec_binds) <- go rule_base import_calls
; return (rules, spec_binds) }
where
go :: RuleBase -> [CallInfoSet] -> CoreM ([CoreRule], [CoreBind])
go _ [] = return ([], [])
go rb (CIS fn calls_for_fn : other_calls)
= do { (rules1, spec_binds1) <- specImport dflags this_mod top_env
done callers rb fn $
Map.toList calls_for_fn
; (rules2, spec_binds2) <- go (extendRuleBaseList rb rules1) other_calls
; return (rules1 ++ rules2, spec_binds1 ++ spec_binds2) }
specImport :: DynFlags
-> Module
-> SpecEnv -- Passed in so that all top-level Ids are in scope
-> VarSet -- Don't specialise these
-- See Note [Avoiding recursive specialisation]
-> [Id] -- Stack of imported functions being specialised
-> RuleBase -- Rules from this module
-> Id -> [CallInfo] -- Imported function and calls for it
-> CoreM ( [CoreRule] -- New rules
, [CoreBind] ) -- Specialised bindings
specImport dflags this_mod top_env done callers rb fn calls_for_fn
| fn `elemVarSet` done
= return ([], []) -- No warning. This actually happens all the time
-- when specialising a recursive function, because
-- the RHS of the specialised function contains a recursive
-- call to the original function
| null calls_for_fn -- We filtered out all the calls in deleteCallsMentioning
= return ([], [])
| wantSpecImport dflags unfolding
, Just rhs <- maybeUnfoldingTemplate unfolding
= do { -- Get rules from the external package state
-- We keep doing this in case we "page-fault in"
-- more rules as we go along
; hsc_env <- getHscEnv
; eps <- liftIO $ hscEPS hsc_env
; vis_orphs <- getVisibleOrphanMods
; let full_rb = unionRuleBase rb (eps_rule_base eps)
rules_for_fn = getRules (RuleEnv full_rb vis_orphs) fn
; (rules1, spec_pairs, uds) <- -- pprTrace "specImport1" (vcat [ppr fn, ppr calls_for_fn, ppr rhs]) $
runSpecM dflags this_mod $
specCalls (Just this_mod) top_env rules_for_fn calls_for_fn fn rhs
; let spec_binds1 = [NonRec b r | (b,r) <- spec_pairs]
-- After the rules kick in we may get recursion, but
-- we rely on a global GlomBinds to sort that out later
-- See Note [Glom the bindings if imported functions are specialised]
-- Now specialise any cascaded calls
; (rules2, spec_binds2) <- -- pprTrace "specImport 2" (ppr fn $$ ppr rules1 $$ ppr spec_binds1) $
specImports dflags this_mod top_env
(extendVarSet done fn)
(fn:callers)
(extendRuleBaseList rb rules1)
(ud_calls uds)
-- Don't forget to wrap the specialized bindings with bindings
-- for the needed dictionaries
-- See Note [Wrap bindings returned by specImports]
; let final_binds = wrapDictBinds (ud_binds uds)
(spec_binds2 ++ spec_binds1)
; return (rules2 ++ rules1, final_binds) }
| warnMissingSpecs dflags callers
= do { warnMsg (vcat [ hang (text "Could not specialise imported function" <+> quotes (ppr fn))
2 (vcat [ text "when specialising" <+> quotes (ppr caller)
| caller <- callers])
, ifPprDebug (text "calls:" <+> vcat (map (pprCallInfo fn) calls_for_fn))
, text "Probable fix: add INLINEABLE pragma on" <+> quotes (ppr fn) ])
; return ([], []) }
| otherwise
= return ([], [])
where
unfolding = realIdUnfolding fn -- We want to see the unfolding even for loop breakers
warnMissingSpecs :: DynFlags -> [Id] -> Bool
-- See Note [Warning about missed specialisations]
warnMissingSpecs dflags callers
| wopt Opt_WarnAllMissedSpecs dflags = True
| not (wopt Opt_WarnMissedSpecs dflags) = False
| null callers = False
| otherwise = all has_inline_prag callers
where
has_inline_prag id = isAnyInlinePragma (idInlinePragma id)
wantSpecImport :: DynFlags -> Unfolding -> Bool
-- See Note [Specialise imported INLINABLE things]
wantSpecImport dflags unf
= case unf of
NoUnfolding -> False
OtherCon {} -> False
DFunUnfolding {} -> True
CoreUnfolding { uf_src = src, uf_guidance = _guidance }
| gopt Opt_SpecialiseAggressively dflags -> True
| isStableSource src -> True
-- Specialise even INLINE things; it hasn't inlined yet,
-- so perhaps it never will. Moreover it may have calls
-- inside it that we want to specialise
| otherwise -> False -- Stable, not INLINE, hence INLINEABLE
{- Note [Warning about missed specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose
* In module Lib, you carefully mark a function 'foo' INLINEABLE
* Import Lib(foo) into another module M
* Call 'foo' at some specialised type in M
Then you jolly well expect it to be specialised in M. But what if
'foo' calls another function 'Lib.bar'. Then you'd like 'bar' to be
specialised too. But if 'bar' is not marked INLINEABLE it may well
not be specialised. The warning Opt_WarnMissedSpecs warns about this.
It's more noisy to warning about a missed specialisation opportunity
for /every/ overloaded imported function, but sometimes useful. That
is what Opt_WarnAllMissedSpecs does.
ToDo: warn about missed opportunities for local functions.
Note [Specialise imported INLINABLE things]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What imported functions do we specialise? The basic set is
* DFuns and things with INLINABLE pragmas.
but with -fspecialise-aggressively we add
* Anything with an unfolding template
Trac #8874 has a good example of why we want to auto-specialise DFuns.
We have the -fspecialise-aggressively flag (usually off), because we
risk lots of orphan modules from over-vigorous specialisation.
However it's not a big deal: anything non-recursive with an
unfolding-template will probably have been inlined already.
Note [Glom the bindings if imported functions are specialised]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose we have an imported, *recursive*, INLINABLE function
f :: Eq a => a -> a
f = /\a \d x. ...(f a d)...
In the module being compiled we have
g x = f (x::Int)
Now we'll make a specialised function
f_spec :: Int -> Int
f_spec = \x -> ...(f Int dInt)...
{-# RULE f Int _ = f_spec #-}
g = \x. f Int dInt x
Note that f_spec doesn't look recursive
After rewriting with the RULE, we get
f_spec = \x -> ...(f_spec)...
BUT since f_spec was non-recursive before it'll *stay* non-recursive.
The occurrence analyser never turns a NonRec into a Rec. So we must
make sure that f_spec is recursive. Easiest thing is to make all
the specialisations for imported bindings recursive.
Note [Avoiding recursive specialisation]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise 'f' we may find new overloaded calls to 'g', 'h' in
'f's RHS. So we want to specialise g,h. But we don't want to
specialise f any more! It's possible that f's RHS might have a
recursive yet-more-specialised call, so we'd diverge in that case.
And if the call is to the same type, one specialisation is enough.
Avoiding this recursive specialisation loop is the reason for the
'done' VarSet passed to specImports and specImport.
************************************************************************
* *
\subsubsection{@specExpr@: the main function}
* *
************************************************************************
-}
data SpecEnv
= SE { se_subst :: CoreSubst.Subst
-- We carry a substitution down:
-- a) we must clone any binding that might float outwards,
-- to avoid name clashes
-- b) we carry a type substitution to use when analysing
-- the RHS of specialised bindings (no type-let!)
, se_interesting :: VarSet
-- Dict Ids that we know something about
-- and hence may be worth specialising against
-- See Note [Interesting dictionary arguments]
}
specVar :: SpecEnv -> Id -> CoreExpr
specVar env v = CoreSubst.lookupIdSubst (text "specVar") (se_subst env) v
specExpr :: SpecEnv -> CoreExpr -> SpecM (CoreExpr, UsageDetails)
---------------- First the easy cases --------------------
specExpr env (Type ty) = return (Type (substTy env ty), emptyUDs)
specExpr env (Coercion co) = return (Coercion (substCo env co), emptyUDs)
specExpr env (Var v) = return (specVar env v, emptyUDs)
specExpr _ (Lit lit) = return (Lit lit, emptyUDs)
specExpr env (Cast e co)
= do { (e', uds) <- specExpr env e
; return ((mkCast e' (substCo env co)), uds) }
specExpr env (Tick tickish body)
= do { (body', uds) <- specExpr env body
; return (Tick (specTickish env tickish) body', uds) }
---------------- Applications might generate a call instance --------------------
specExpr env expr@(App {})
= go expr []
where
go (App fun arg) args = do (arg', uds_arg) <- specExpr env arg
(fun', uds_app) <- go fun (arg':args)
return (App fun' arg', uds_arg `plusUDs` uds_app)
go (Var f) args = case specVar env f of
Var f' -> return (Var f', mkCallUDs env f' args)
e' -> return (e', emptyUDs) -- I don't expect this!
go other _ = specExpr env other
---------------- Lambda/case require dumping of usage details --------------------
specExpr env e@(Lam _ _) = do
(body', uds) <- specExpr env' body
let (free_uds, dumped_dbs) = dumpUDs bndrs' uds
return (mkLams bndrs' (wrapDictBindsE dumped_dbs body'), free_uds)
where
(bndrs, body) = collectBinders e
(env', bndrs') = substBndrs env bndrs
-- More efficient to collect a group of binders together all at once
-- and we don't want to split a lambda group with dumped bindings
specExpr env (Case scrut case_bndr ty alts)
= do { (scrut', scrut_uds) <- specExpr env scrut
; (scrut'', case_bndr', alts', alts_uds)
<- specCase env scrut' case_bndr alts
; return (Case scrut'' case_bndr' (substTy env ty) alts'
, scrut_uds `plusUDs` alts_uds) }
---------------- Finally, let is the interesting case --------------------
specExpr env (Let bind body)
= do { -- Clone binders
(rhs_env, body_env, bind') <- cloneBindSM env bind
-- Deal with the body
; (body', body_uds) <- specExpr body_env body
-- Deal with the bindings
; (binds', uds) <- specBind rhs_env bind' body_uds
-- All done
; return (foldr Let body' binds', uds) }
specTickish :: SpecEnv -> Tickish Id -> Tickish Id
specTickish env (Breakpoint ix ids)
= Breakpoint ix [ id' | id <- ids, Var id' <- [specVar env id]]
-- drop vars from the list if they have a non-variable substitution.
-- should never happen, but it's harmless to drop them anyway.
specTickish _ other_tickish = other_tickish
specCase :: SpecEnv
-> CoreExpr -- Scrutinee, already done
-> Id -> [CoreAlt]
-> SpecM ( CoreExpr -- New scrutinee
, Id
, [CoreAlt]
, UsageDetails)
specCase env scrut' case_bndr [(con, args, rhs)]
| isDictId case_bndr -- See Note [Floating dictionaries out of cases]
, interestingDict env scrut'
, not (isDeadBinder case_bndr && null sc_args')
= do { (case_bndr_flt : sc_args_flt) <- mapM clone_me (case_bndr' : sc_args')
; let sc_rhss = [ Case (Var case_bndr_flt) case_bndr' (idType sc_arg')
[(con, args', Var sc_arg')]
| sc_arg' <- sc_args' ]
-- Extend the substitution for RHS to map the *original* binders
-- to their floated verions.
mb_sc_flts :: [Maybe DictId]
mb_sc_flts = map (lookupVarEnv clone_env) args'
clone_env = zipVarEnv sc_args' sc_args_flt
subst_prs = (case_bndr, Var case_bndr_flt)
: [ (arg, Var sc_flt)
| (arg, Just sc_flt) <- args `zip` mb_sc_flts ]
env_rhs' = env_rhs { se_subst = CoreSubst.extendIdSubstList (se_subst env_rhs) subst_prs
, se_interesting = se_interesting env_rhs `extendVarSetList`
(case_bndr_flt : sc_args_flt) }
; (rhs', rhs_uds) <- specExpr env_rhs' rhs
; let scrut_bind = mkDB (NonRec case_bndr_flt scrut')
case_bndr_set = unitVarSet case_bndr_flt
sc_binds = [(NonRec sc_arg_flt sc_rhs, case_bndr_set)
| (sc_arg_flt, sc_rhs) <- sc_args_flt `zip` sc_rhss ]
flt_binds = scrut_bind : sc_binds
(free_uds, dumped_dbs) = dumpUDs (case_bndr':args') rhs_uds
all_uds = flt_binds `addDictBinds` free_uds
alt' = (con, args', wrapDictBindsE dumped_dbs rhs')
; return (Var case_bndr_flt, case_bndr', [alt'], all_uds) }
where
(env_rhs, (case_bndr':args')) = substBndrs env (case_bndr:args)
sc_args' = filter is_flt_sc_arg args'
clone_me bndr = do { uniq <- getUniqueM
; return (mkUserLocalOrCoVar occ uniq ty loc) }
where
name = idName bndr
ty = idType bndr
occ = nameOccName name
loc = getSrcSpan name
arg_set = mkVarSet args'
is_flt_sc_arg var = isId var
&& not (isDeadBinder var)
&& isDictTy var_ty
&& not (tyCoVarsOfType var_ty `intersectsVarSet` arg_set)
where
var_ty = idType var
specCase env scrut case_bndr alts
= do { (alts', uds_alts) <- mapAndCombineSM spec_alt alts
; return (scrut, case_bndr', alts', uds_alts) }
where
(env_alt, case_bndr') = substBndr env case_bndr
spec_alt (con, args, rhs) = do
(rhs', uds) <- specExpr env_rhs rhs
let (free_uds, dumped_dbs) = dumpUDs (case_bndr' : args') uds
return ((con, args', wrapDictBindsE dumped_dbs rhs'), free_uds)
where
(env_rhs, args') = substBndrs env_alt args
{-
Note [Floating dictionaries out of cases]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
g = \d. case d of { MkD sc ... -> ...(f sc)... }
Naively we can't float d2's binding out of the case expression,
because 'sc' is bound by the case, and that in turn means we can't
specialise f, which seems a pity.
So we invert the case, by floating out a binding
for 'sc_flt' thus:
sc_flt = case d of { MkD sc ... -> sc }
Now we can float the call instance for 'f'. Indeed this is just
what'll happen if 'sc' was originally bound with a let binding,
but case is more efficient, and necessary with equalities. So it's
good to work with both.
You might think that this won't make any difference, because the
call instance will only get nuked by the \d. BUT if 'g' itself is
specialised, then transitively we should be able to specialise f.
In general, given
case e of cb { MkD sc ... -> ...(f sc)... }
we transform to
let cb_flt = e
sc_flt = case cb_flt of { MkD sc ... -> sc }
in
case cb_flt of bg { MkD sc ... -> ....(f sc_flt)... }
The "_flt" things are the floated binds; we use the current substitution
to substitute sc -> sc_flt in the RHS
************************************************************************
* *
Dealing with a binding
* *
************************************************************************
-}
specBind :: SpecEnv -- Use this for RHSs
-> CoreBind
-> UsageDetails -- Info on how the scope of the binding
-> SpecM ([CoreBind], -- New bindings
UsageDetails) -- And info to pass upstream
-- Returned UsageDetails:
-- No calls for binders of this bind
specBind rhs_env (NonRec fn rhs) body_uds
= do { (rhs', rhs_uds) <- specExpr rhs_env rhs
; (fn', spec_defns, body_uds1) <- specDefn rhs_env body_uds fn rhs
; let pairs = spec_defns ++ [(fn', rhs')]
-- fn' mentions the spec_defns in its rules,
-- so put the latter first
combined_uds = body_uds1 `plusUDs` rhs_uds
-- This way round a call in rhs_uds of a function f
-- at type T will override a call of f at T in body_uds1; and
-- that is good because it'll tend to keep "earlier" calls
-- See Note [Specialisation of dictionary functions]
(free_uds, dump_dbs, float_all) = dumpBindUDs [fn] combined_uds
-- See Note [From non-recursive to recursive]
final_binds :: [DictBind]
final_binds
| isEmptyBag dump_dbs = [mkDB $ NonRec b r | (b,r) <- pairs]
| otherwise = [flattenDictBinds dump_dbs pairs]
; if float_all then
-- Rather than discard the calls mentioning the bound variables
-- we float this binding along with the others
return ([], free_uds `snocDictBinds` final_binds)
else
-- No call in final_uds mentions bound variables,
-- so we can just leave the binding here
return (map fst final_binds, free_uds) }
specBind rhs_env (Rec pairs) body_uds
-- Note [Specialising a recursive group]
= do { let (bndrs,rhss) = unzip pairs
; (rhss', rhs_uds) <- mapAndCombineSM (specExpr rhs_env) rhss
; let scope_uds = body_uds `plusUDs` rhs_uds
-- Includes binds and calls arising from rhss
; (bndrs1, spec_defns1, uds1) <- specDefns rhs_env scope_uds pairs
; (bndrs3, spec_defns3, uds3)
<- if null spec_defns1 -- Common case: no specialisation
then return (bndrs1, [], uds1)
else do { -- Specialisation occurred; do it again
(bndrs2, spec_defns2, uds2)
<- specDefns rhs_env uds1 (bndrs1 `zip` rhss)
; return (bndrs2, spec_defns2 ++ spec_defns1, uds2) }
; let (final_uds, dumped_dbs, float_all) = dumpBindUDs bndrs uds3
bind = flattenDictBinds dumped_dbs
(spec_defns3 ++ zip bndrs3 rhss')
; if float_all then
return ([], final_uds `snocDictBind` bind)
else
return ([fst bind], final_uds) }
---------------------------
specDefns :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> [(Id,CoreExpr)] -- The things being bound and their un-processed RHS
-> SpecM ([Id], -- Original Ids with RULES added
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
-- Specialise a list of bindings (the contents of a Rec), but flowing usages
-- upwards binding by binding. Example: { f = ...g ...; g = ...f .... }
-- Then if the input CallDetails has a specialised call for 'g', whose specialisation
-- in turn generates a specialised call for 'f', we catch that in this one sweep.
-- But not vice versa (it's a fixpoint problem).
specDefns _env uds []
= return ([], [], uds)
specDefns env uds ((bndr,rhs):pairs)
= do { (bndrs1, spec_defns1, uds1) <- specDefns env uds pairs
; (bndr1, spec_defns2, uds2) <- specDefn env uds1 bndr rhs
; return (bndr1 : bndrs1, spec_defns1 ++ spec_defns2, uds2) }
---------------------------
specDefn :: SpecEnv
-> UsageDetails -- Info on how it is used in its scope
-> Id -> CoreExpr -- The thing being bound and its un-processed RHS
-> SpecM (Id, -- Original Id with added RULES
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- Stuff to fling upwards from the specialised versions
specDefn env body_uds fn rhs
= do { let (body_uds_without_me, calls_for_me) = callsForMe fn body_uds
rules_for_me = idCoreRules fn
; (rules, spec_defns, spec_uds) <- specCalls Nothing env rules_for_me
calls_for_me fn rhs
; return ( fn `addIdSpecialisations` rules
, spec_defns
, body_uds_without_me `plusUDs` spec_uds) }
-- It's important that the `plusUDs` is this way
-- round, because body_uds_without_me may bind
-- dictionaries that are used in calls_for_me passed
-- to specDefn. So the dictionary bindings in
-- spec_uds may mention dictionaries bound in
-- body_uds_without_me
---------------------------
specCalls :: Maybe Module -- Just this_mod => specialising imported fn
-- Nothing => specialising local fn
-> SpecEnv
-> [CoreRule] -- Existing RULES for the fn
-> [CallInfo]
-> Id -> CoreExpr
-> SpecM ([CoreRule], -- New RULES for the fn
[(Id,CoreExpr)], -- Extra, specialised bindings
UsageDetails) -- New usage details from the specialised RHSs
-- This function checks existing rules, and does not create
-- duplicate ones. So the caller does not need to do this filtering.
-- See 'already_covered'
specCalls mb_mod env rules_for_me calls_for_me fn rhs
-- The first case is the interesting one
| rhs_tyvars `lengthIs` n_tyvars -- Rhs of fn's defn has right number of big lambdas
&& rhs_ids `lengthAtLeast` n_dicts -- and enough dict args
&& notNull calls_for_me -- And there are some calls to specialise
&& not (isNeverActive (idInlineActivation fn))
-- Don't specialise NOINLINE things
-- See Note [Auto-specialisation and RULES]
-- && not (certainlyWillInline (idUnfolding fn)) -- And it's not small
-- See Note [Inline specialisation] for why we do not
-- switch off specialisation for inline functions
= -- pprTrace "specDefn: some" (ppr fn $$ ppr calls_for_me $$ ppr rules_for_me) $
do { stuff <- mapM spec_call calls_for_me
; let (spec_defns, spec_uds, spec_rules) = unzip3 (catMaybes stuff)
; return (spec_rules, spec_defns, plusUDList spec_uds) }
| otherwise -- No calls or RHS doesn't fit our preconceptions
= WARN( not (exprIsTrivial rhs) && notNull calls_for_me,
text "Missed specialisation opportunity for"
<+> ppr fn $$ _trace_doc )
-- Note [Specialisation shape]
-- pprTrace "specDefn: none" (ppr fn <+> ppr calls_for_me) $
return ([], [], emptyUDs)
where
_trace_doc = sep [ ppr rhs_tyvars, ppr n_tyvars
, ppr rhs_ids, ppr n_dicts
, ppr (idInlineActivation fn) ]
fn_type = idType fn
fn_arity = idArity fn
fn_unf = realIdUnfolding fn -- Ignore loop-breaker-ness here
(tyvars, theta, _) = tcSplitSigmaTy fn_type
n_tyvars = length tyvars
n_dicts = length theta
inl_prag = idInlinePragma fn
inl_act = inlinePragmaActivation inl_prag
is_local = isLocalId fn
-- Figure out whether the function has an INLINE pragma
-- See Note [Inline specialisations]
(rhs_tyvars, rhs_ids, rhs_body) = collectTyAndValBinders rhs
rhs_dict_ids = take n_dicts rhs_ids
body = mkLams (drop n_dicts rhs_ids) rhs_body
-- Glue back on the non-dict lambdas
already_covered :: DynFlags -> [CoreExpr] -> Bool
already_covered dflags args -- Note [Specialisations already covered]
= isJust (lookupRule dflags
(CoreSubst.substInScope (se_subst env), realIdUnfolding)
(const True)
fn args rules_for_me)
mk_ty_args :: [Maybe Type] -> [TyVar] -> [CoreExpr]
mk_ty_args [] poly_tvs
= ASSERT( null poly_tvs ) []
mk_ty_args (Nothing : call_ts) (poly_tv : poly_tvs)
= Type (mkTyVarTy poly_tv) : mk_ty_args call_ts poly_tvs
mk_ty_args (Just ty : call_ts) poly_tvs
= Type ty : mk_ty_args call_ts poly_tvs
mk_ty_args (Nothing : _) [] = panic "mk_ty_args"
----------------------------------------------------------
-- Specialise to one particular call pattern
spec_call :: CallInfo -- Call instance
-> SpecM (Maybe ((Id,CoreExpr), -- Specialised definition
UsageDetails, -- Usage details from specialised body
CoreRule)) -- Info for the Id's SpecEnv
spec_call _call_info@(CallKey call_ts, (call_ds, _))
= ASSERT( call_ts `lengthIs` n_tyvars && call_ds `lengthIs` n_dicts )
-- Suppose f's defn is f = /\ a b c -> \ d1 d2 -> rhs
-- Suppose the call is for f [Just t1, Nothing, Just t3] [dx1, dx2]
-- Construct the new binding
-- f1 = SUBST[a->t1,c->t3, d1->d1', d2->d2'] (/\ b -> rhs)
-- PLUS the usage-details
-- { d1' = dx1; d2' = dx2 }
-- where d1', d2' are cloned versions of d1,d2, with the type substitution
-- applied. These auxiliary bindings just avoid duplication of dx1, dx2
--
-- Note that the substitution is applied to the whole thing.
-- This is convenient, but just slightly fragile. Notably:
-- * There had better be no name clashes in a/b/c
do { let
-- poly_tyvars = [b] in the example above
-- spec_tyvars = [a,c]
-- ty_args = [t1,b,t3]
spec_tv_binds = [(tv,ty) | (tv, Just ty) <- rhs_tyvars `zip` call_ts]
env1 = extendTvSubstList env spec_tv_binds
(rhs_env, poly_tyvars) = substBndrs env1
[tv | (tv, Nothing) <- rhs_tyvars `zip` call_ts]
-- Clone rhs_dicts, including instantiating their types
; inst_dict_ids <- mapM (newDictBndr rhs_env) rhs_dict_ids
; let (rhs_env2, dx_binds, spec_dict_args)
= bindAuxiliaryDicts rhs_env rhs_dict_ids call_ds inst_dict_ids
ty_args = mk_ty_args call_ts poly_tyvars
rule_args = ty_args ++ map varToCoreExpr inst_dict_ids
-- varToCoreExpr does the right thing for CoVars
rule_bndrs = poly_tyvars ++ inst_dict_ids
; dflags <- getDynFlags
; if already_covered dflags rule_args then
return Nothing
else -- pprTrace "spec_call" (vcat [ ppr _call_info, ppr fn, ppr rhs_dict_ids
-- , text "rhs_env2" <+> ppr (se_subst rhs_env2)
-- , ppr dx_binds ]) $
do
{ -- Figure out the type of the specialised function
let body_ty = applyTypeToArgs rhs fn_type rule_args
(lam_args, app_args) -- Add a dummy argument if body_ty is unlifted
| isUnliftedType body_ty -- C.f. WwLib.mkWorkerArgs
= (poly_tyvars ++ [voidArgId], poly_tyvars ++ [voidPrimId])
| otherwise = (poly_tyvars, poly_tyvars)
spec_id_ty = mkPiTypes lam_args body_ty
; spec_f <- newSpecIdSM fn spec_id_ty
; (spec_rhs, rhs_uds) <- specExpr rhs_env2 (mkLams lam_args body)
; this_mod <- getModule
; let
-- The rule to put in the function's specialisation is:
-- forall b, d1',d2'. f t1 b t3 d1' d2' = f1 b
herald = case mb_mod of
Nothing -- Specialising local fn
-> text "SPEC"
Just this_mod -- Specialising imoprted fn
-> text "SPEC/" <> ppr this_mod
rule_name = mkFastString $ showSDocForUser dflags neverQualify $
herald <+> ppr fn <+> hsep (map ppr_call_key_ty call_ts)
-- This name ends up in interface files, so use showSDocForUser,
-- otherwise uniques end up there, making builds
-- less deterministic (See #4012 comment:61 ff)
spec_env_rule = mkRule
this_mod
True {- Auto generated -}
is_local
rule_name
inl_act -- Note [Auto-specialisation and RULES]
(idName fn)
rule_bndrs
rule_args
(mkVarApps (Var spec_f) app_args)
-- Add the { d1' = dx1; d2' = dx2 } usage stuff
final_uds = foldr consDictBind rhs_uds dx_binds
--------------------------------------
-- Add a suitable unfolding if the spec_inl_prag says so
-- See Note [Inline specialisations]
(spec_inl_prag, spec_unf)
| not is_local && isStrongLoopBreaker (idOccInfo fn)
= (neverInlinePragma, noUnfolding)
-- See Note [Specialising imported functions] in OccurAnal
| InlinePragma { inl_inline = Inlinable } <- inl_prag
= (inl_prag { inl_inline = EmptyInlineSpec }, noUnfolding)
| otherwise
= (inl_prag, specUnfolding dflags spec_unf_subst poly_tyvars
spec_unf_args fn_unf)
spec_unf_args = ty_args ++ spec_dict_args
spec_unf_subst = CoreSubst.setInScope (se_subst env)
(CoreSubst.substInScope (se_subst rhs_env2))
-- Extend the in-scope set to satisfy the precondition of
-- specUnfolding, namely that in-scope(unf_subst) includes
-- the free vars of spec_unf_args. The in-scope set of rhs_env2
-- is just the ticket; but the actual substitution we want is
-- the same old one from 'env'
--------------------------------------
-- Adding arity information just propagates it a bit faster
-- See Note [Arity decrease] in Simplify
-- Copy InlinePragma information from the parent Id.
-- So if f has INLINE[1] so does spec_f
spec_f_w_arity = spec_f `setIdArity` max 0 (fn_arity - n_dicts)
`setInlinePragma` spec_inl_prag
`setIdUnfolding` spec_unf
; return (Just ((spec_f_w_arity, spec_rhs), final_uds, spec_env_rule)) } }
{-
Note [Orphans and auto-generated rules]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When we specialise an INLINEABLE function, or when we have
-fspecialise-aggressively, we auto-generate RULES that are orphans.
We don't want to warn about these, or we'd generate a lot of warnings.
Thus, we only warn about user-specified orphan rules.
Indeed, we don't even treat the module as an orphan module if it has
auto-generated *rule* orphans. Orphan modules are read every time we
compile, so they are pretty obtrusive and slow down every compilation,
even non-optimised ones. (Reason: for type class instances it's a
type correctness issue.) But specialisation rules are strictly for
*optimisation* only so it's fine not to read the interface.
What this means is that a SPEC rules from auto-specialisation in
module M will be used in other modules only if M.hi has been read for
some other reason, which is actually pretty likely.
-}
bindAuxiliaryDicts
:: SpecEnv
-> [DictId] -> [CoreExpr] -- Original dict bndrs, and the witnessing expressions
-> [DictId] -- A cloned dict-id for each dict arg
-> (SpecEnv, -- Substitute for all orig_dicts
[DictBind], -- Auxiliary dict bindings
[CoreExpr]) -- Witnessing expressions (all trivial)
-- Bind any dictionary arguments to fresh names, to preserve sharing
bindAuxiliaryDicts env@(SE { se_subst = subst, se_interesting = interesting })
orig_dict_ids call_ds inst_dict_ids
= (env', dx_binds, spec_dict_args)
where
(dx_binds, spec_dict_args) = go call_ds inst_dict_ids
env' = env { se_subst = subst `CoreSubst.extendSubstList`
(orig_dict_ids `zip` spec_dict_args)
`CoreSubst.extendInScopeList` dx_ids
, se_interesting = interesting `unionVarSet` interesting_dicts }
dx_ids = [dx_id | (NonRec dx_id _, _) <- dx_binds]
interesting_dicts = mkVarSet [ dx_id | (NonRec dx_id dx, _) <- dx_binds
, interestingDict env dx ]
-- See Note [Make the new dictionaries interesting]
go :: [CoreExpr] -> [CoreBndr] -> ([DictBind], [CoreExpr])
go [] _ = ([], [])
go (dx:dxs) (dx_id:dx_ids)
| exprIsTrivial dx = (dx_binds, dx : args)
| otherwise = (mkDB (NonRec dx_id dx) : dx_binds, Var dx_id : args)
where
(dx_binds, args) = go dxs dx_ids
-- In the first case extend the substitution but not bindings;
-- in the latter extend the bindings but not the substitution.
-- For the former, note that we bind the *original* dict in the substitution,
-- overriding any d->dx_id binding put there by substBndrs
go _ _ = pprPanic "bindAuxiliaryDicts" (ppr orig_dict_ids $$ ppr call_ds $$ ppr inst_dict_ids)
{-
Note [Make the new dictionaries interesting]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Important! We're going to substitute dx_id1 for d
and we want it to look "interesting", else we won't gather *any*
consequential calls. E.g.
f d = ...g d....
If we specialise f for a call (f (dfun dNumInt)), we'll get
a consequent call (g d') with an auxiliary definition
d' = df dNumInt
We want that consequent call to look interesting
Note [From non-recursive to recursive]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Even in the non-recursive case, if any dict-binds depend on 'fn' we might
have built a recursive knot
f a d x = <blah>
MkUD { ud_binds = d7 = MkD ..f..
, ud_calls = ...(f T d7)... }
The we generate
Rec { fs x = <blah>[T/a, d7/d]
f a d x = <blah>
RULE f T _ = fs
d7 = ...f... }
Here the recursion is only through the RULE.
Note [Specialisation of dictionary functions]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is a nasty example that bit us badly: see Trac #3591
class Eq a => C a
instance Eq [a] => C [a]
---------------
dfun :: Eq [a] -> C [a]
dfun a d = MkD a d (meth d)
d4 :: Eq [T] = <blah>
d2 :: C [T] = dfun T d4
d1 :: Eq [T] = $p1 d2
d3 :: C [T] = dfun T d1
None of these definitions is recursive. What happened was that we
generated a specialisation:
RULE forall d. dfun T d = dT :: C [T]
dT = (MkD a d (meth d)) [T/a, d1/d]
= MkD T d1 (meth d1)
But now we use the RULE on the RHS of d2, to get
d2 = dT = MkD d1 (meth d1)
d1 = $p1 d2
and now d1 is bottom! The problem is that when specialising 'dfun' we
should first dump "below" the binding all floated dictionary bindings
that mention 'dfun' itself. So d2 and d3 (and hence d1) must be
placed below 'dfun', and thus unavailable to it when specialising
'dfun'. That in turn means that the call (dfun T d1) must be
discarded. On the other hand, the call (dfun T d4) is fine, assuming
d4 doesn't mention dfun.
But look at this:
class C a where { foo,bar :: [a] -> [a] }
instance C Int where
foo x = r_bar x
bar xs = reverse xs
r_bar :: C a => [a] -> [a]
r_bar xs = bar (xs ++ xs)
That translates to:
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs }
The call (r_bar $fCInt) mentions $fCInt,
which mentions foo_help,
which mentions r_bar
But we DO want to specialise r_bar at Int:
Rec { $fCInt :: C Int = MkC foo_help reverse
foo_help (xs::[Int]) = r_bar Int $fCInt xs
r_bar a (c::C a) (xs::[a]) = bar a d (xs ++ xs)
RULE r_bar Int _ = r_bar_Int
r_bar_Int xs = bar Int $fCInt (xs ++ xs)
}
Note that, because of its RULE, r_bar joins the recursive
group. (In this case it'll unravel a short moment later.)
Conclusion: we catch the nasty case using filter_dfuns in
callsForMe. To be honest I'm not 100% certain that this is 100%
right, but it works. Sigh.
Note [Specialising a recursive group]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider
let rec { f x = ...g x'...
; g y = ...f y'.... }
in f 'a'
Here we specialise 'f' at Char; but that is very likely to lead to
a specialisation of 'g' at Char. We must do the latter, else the
whole point of specialisation is lost.
But we do not want to keep iterating to a fixpoint, because in the
presence of polymorphic recursion we might generate an infinite number
of specialisations.
So we use the following heuristic:
* Arrange the rec block in dependency order, so far as possible
(the occurrence analyser already does this)
* Specialise it much like a sequence of lets
* Then go through the block a second time, feeding call-info from
the RHSs back in the bottom, as it were
In effect, the ordering maxmimises the effectiveness of each sweep,
and we do just two sweeps. This should catch almost every case of
monomorphic recursion -- the exception could be a very knotted-up
recursion with multiple cycles tied up together.
This plan is implemented in the Rec case of specBindItself.
Note [Specialisations already covered]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We obviously don't want to generate two specialisations for the same
argument pattern. There are two wrinkles
1. We do the already-covered test in specDefn, not when we generate
the CallInfo in mkCallUDs. We used to test in the latter place, but
we now iterate the specialiser somewhat, and the Id at the call site
might therefore not have all the RULES that we can see in specDefn
2. What about two specialisations where the second is an *instance*
of the first? If the more specific one shows up first, we'll generate
specialisations for both. If the *less* specific one shows up first,
we *don't* currently generate a specialisation for the more specific
one. (See the call to lookupRule in already_covered.) Reasons:
(a) lookupRule doesn't say which matches are exact (bad reason)
(b) if the earlier specialisation is user-provided, it's
far from clear that we should auto-specialise further
Note [Auto-specialisation and RULES]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider:
g :: Num a => a -> a
g = ...
f :: (Int -> Int) -> Int
f w = ...
{-# RULE f g = 0 #-}
Suppose that auto-specialisation makes a specialised version of
g::Int->Int That version won't appear in the LHS of the RULE for f.
So if the specialisation rule fires too early, the rule for f may
never fire.
It might be possible to add new rules, to "complete" the rewrite system.
Thus when adding
RULE forall d. g Int d = g_spec
also add
RULE f g_spec = 0
But that's a bit complicated. For now we ask the programmer's help,
by *copying the INLINE activation pragma* to the auto-specialised
rule. So if g says {-# NOINLINE[2] g #-}, then the auto-spec rule
will also not be active until phase 2. And that's what programmers
should jolly well do anyway, even aside from specialisation, to ensure
that g doesn't inline too early.
This in turn means that the RULE would never fire for a NOINLINE
thing so not much point in generating a specialisation at all.
Note [Specialisation shape]
~~~~~~~~~~~~~~~~~~~~~~~~~~~
We only specialise a function if it has visible top-level lambdas
corresponding to its overloading. E.g. if
f :: forall a. Eq a => ....
then its body must look like
f = /\a. \d. ...
Reason: when specialising the body for a call (f ty dexp), we want to
substitute dexp for d, and pick up specialised calls in the body of f.
This doesn't always work. One example I came across was this:
newtype Gen a = MkGen{ unGen :: Int -> a }
choose :: Eq a => a -> Gen a
choose n = MkGen (\r -> n)
oneof = choose (1::Int)
It's a silly exapmle, but we get
choose = /\a. g `cast` co
where choose doesn't have any dict arguments. Thus far I have not
tried to fix this (wait till there's a real example).
Mind you, then 'choose' will be inlined (since RHS is trivial) so
it doesn't matter. This comes up with single-method classes
class C a where { op :: a -> a }
instance C a => C [a] where ....
==>
$fCList :: C a => C [a]
$fCList = $copList |> (...coercion>...)
....(uses of $fCList at particular types)...
So we suppress the WARN if the rhs is trivial.
Note [Inline specialisations]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Here is what we do with the InlinePragma of the original function
* Activation/RuleMatchInfo: both transferred to the
specialised function
* InlineSpec:
(a) An INLINE pragma is transferred
(b) An INLINABLE pragma is *not* transferred
Why (a): transfer INLINE pragmas? The point of INLINE was precisely to
specialise the function at its call site, and arguably that's not so
important for the specialised copies. BUT *pragma-directed*
specialisation now takes place in the typechecker/desugarer, with
manually specified INLINEs. The specialisation here is automatic.
It'd be very odd if a function marked INLINE was specialised (because
of some local use), and then forever after (including importing
modules) the specialised version wasn't INLINEd. After all, the
programmer said INLINE!
You might wonder why we specialise INLINE functions at all. After
all they should be inlined, right? Two reasons:
* Even INLINE functions are sometimes not inlined, when they aren't
applied to interesting arguments. But perhaps the type arguments
alone are enough to specialise (even though the args are too boring
to trigger inlining), and it's certainly better to call the
specialised version.
* The RHS of an INLINE function might call another overloaded function,
and we'd like to generate a specialised version of that function too.
This actually happens a lot. Consider
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINABLE replicateM_ #-}
replicateM_ d x ma = ...
The strictness analyser may transform to
replicateM_ :: (Monad m) => Int -> m a -> m ()
{-# INLINE replicateM_ #-}
replicateM_ d x ma = case x of I# x' -> $wreplicateM_ d x' ma
$wreplicateM_ :: (Monad m) => Int# -> m a -> m ()
{-# INLINABLE $wreplicateM_ #-}
$wreplicateM_ = ...
Now an importing module has a specialised call to replicateM_, say
(replicateM_ dMonadIO). We certainly want to specialise $wreplicateM_!
This particular example had a huge effect on the call to replicateM_
in nofib/shootout/n-body.
Why (b): discard INLINEABLE pragmas? See Trac #4874 for persuasive examples.
Suppose we have
{-# INLINABLE f #-}
f :: Ord a => [a] -> Int
f xs = letrec f' = ...f'... in f'
Then, when f is specialised and optimised we might get
wgo :: [Int] -> Int#
wgo = ...wgo...
f_spec :: [Int] -> Int
f_spec xs = case wgo xs of { r -> I# r }
and we clearly want to inline f_spec at call sites. But if we still
have the big, un-optimised of f (albeit specialised) captured in an
INLINABLE pragma for f_spec, we won't get that optimisation.
So we simply drop INLINABLE pragmas when specialising. It's not really
a complete solution; ignoring specialisation for now, INLINABLE functions
don't get properly strictness analysed, for example. But it works well
for examples involving specialisation, which is the dominant use of
INLINABLE. See Trac #4874.
************************************************************************
* *
\subsubsection{UsageDetails and suchlike}
* *
************************************************************************
-}
data UsageDetails
= MkUD {
ud_binds :: !(Bag DictBind),
-- Floated dictionary bindings
-- The order is important;
-- in ds1 `union` ds2, bindings in ds2 can depend on those in ds1
-- (Remember, Bags preserve order in GHC.)
ud_calls :: !CallDetails
-- INVARIANT: suppose bs = bindersOf ud_binds
-- Then 'calls' may *mention* 'bs',
-- but there should be no calls *for* bs
}
instance Outputable UsageDetails where
ppr (MkUD { ud_binds = dbs, ud_calls = calls })
= text "MkUD" <+> braces (sep (punctuate comma
[text "binds" <+> equals <+> ppr dbs,
text "calls" <+> equals <+> ppr calls]))
-- | A 'DictBind' is a binding along with a cached set containing its free
-- variables (both type variables and dictionaries)
type DictBind = (CoreBind, VarSet)
type DictExpr = CoreExpr
emptyUDs :: UsageDetails
emptyUDs = MkUD { ud_binds = emptyBag, ud_calls = emptyDVarEnv }
------------------------------------------------------------
type CallDetails = DIdEnv CallInfoSet
-- The order of specialized binds and rules depends on how we linearize
-- CallDetails, so to get determinism we must use a deterministic set here.
-- See Note [Deterministic UniqFM] in UniqDFM
newtype CallKey = CallKey [Maybe Type] -- Nothing => unconstrained type argument
-- CallInfo uses a Map, thereby ensuring that
-- we record only one call instance for any key
--
-- The list of types and dictionaries is guaranteed to
-- match the type of f
data CallInfoSet = CIS Id (Map CallKey ([DictExpr], VarSet))
-- Range is dict args and the vars of the whole
-- call (including tyvars)
-- [*not* include the main id itself, of course]
type CallInfo = (CallKey, ([DictExpr], VarSet))
instance Outputable CallInfoSet where
ppr (CIS fn map) = hang (text "CIS" <+> ppr fn)
2 (ppr map)
pprCallInfo :: Id -> CallInfo -> SDoc
pprCallInfo fn (CallKey mb_tys, (_dxs, _))
= hang (ppr fn)
2 (fsep (map ppr_call_key_ty mb_tys {- ++ map pprParendExpr _dxs -}))
ppr_call_key_ty :: Maybe Type -> SDoc
ppr_call_key_ty Nothing = char '_'
ppr_call_key_ty (Just ty) = char '@' <+> pprParendType ty
instance Outputable CallKey where
ppr (CallKey ts) = ppr ts
-- Type isn't an instance of Ord, so that we can control which
-- instance we use. That's tiresome here. Oh well
instance Eq CallKey where
k1 == k2 = case k1 `compare` k2 of { EQ -> True; _ -> False }
instance Ord CallKey where
compare (CallKey k1) (CallKey k2) = cmpList cmp k1 k2
where
cmp Nothing Nothing = EQ
cmp Nothing (Just _) = LT
cmp (Just _) Nothing = GT
cmp (Just t1) (Just t2) = cmpType t1 t2
unionCalls :: CallDetails -> CallDetails -> CallDetails
unionCalls c1 c2 = plusDVarEnv_C unionCallInfoSet c1 c2
unionCallInfoSet :: CallInfoSet -> CallInfoSet -> CallInfoSet
unionCallInfoSet (CIS f calls1) (CIS _ calls2) = CIS f (calls1 `Map.union` calls2)
callDetailsFVs :: CallDetails -> VarSet
callDetailsFVs calls =
nonDetFoldUDFM (unionVarSet . callInfoFVs) emptyVarSet calls
-- It's OK to use nonDetFoldUDFM here because we forget the ordering
-- immediately by converting to a nondeterministic set.
callInfoFVs :: CallInfoSet -> VarSet
callInfoFVs (CIS _ call_info) = Map.foldRight (\(_,fv) vs -> unionVarSet fv vs) emptyVarSet call_info
------------------------------------------------------------
singleCall :: Id -> [Maybe Type] -> [DictExpr] -> UsageDetails
singleCall id tys dicts
= MkUD {ud_binds = emptyBag,
ud_calls = unitDVarEnv id $ CIS id $
Map.singleton (CallKey tys) (dicts, call_fvs) }
where
call_fvs = exprsFreeVars dicts `unionVarSet` tys_fvs
tys_fvs = tyCoVarsOfTypes (catMaybes tys)
-- The type args (tys) are guaranteed to be part of the dictionary
-- types, because they are just the constrained types,
-- and the dictionary is therefore sure to be bound
-- inside the binding for any type variables free in the type;
-- hence it's safe to neglect tyvars free in tys when making
-- the free-var set for this call
-- BUT I don't trust this reasoning; play safe and include tys_fvs
--
-- We don't include the 'id' itself.
mkCallUDs, mkCallUDs' :: SpecEnv -> Id -> [CoreExpr] -> UsageDetails
mkCallUDs env f args
= -- pprTrace "mkCallUDs" (vcat [ ppr f, ppr args, ppr res ])
res
where
res = mkCallUDs' env f args
mkCallUDs' env f args
| not (want_calls_for f) -- Imported from elsewhere
|| null theta -- Not overloaded
= emptyUDs
| not (all type_determines_value theta)
|| not (spec_tys `lengthIs` n_tyvars)
|| not ( dicts `lengthIs` n_dicts)
|| not (any (interestingDict env) dicts) -- Note [Interesting dictionary arguments]
-- See also Note [Specialisations already covered]
= -- pprTrace "mkCallUDs: discarding" _trace_doc
emptyUDs -- Not overloaded, or no specialisation wanted
| otherwise
= -- pprTrace "mkCallUDs: keeping" _trace_doc
singleCall f spec_tys dicts
where
_trace_doc = vcat [ppr f, ppr args, ppr n_tyvars, ppr n_dicts
, ppr (map (interestingDict env) dicts)]
(tyvars, theta, _) = tcSplitSigmaTy (idType f)
constrained_tyvars = tyCoVarsOfTypes theta
n_tyvars = length tyvars
n_dicts = length theta
spec_tys = [mk_spec_ty tv ty | (tv, ty) <- tyvars `type_zip` args]
dicts = [dict_expr | (_, dict_expr) <- theta `zip` (drop n_tyvars args)]
-- ignores Coercion arguments
type_zip :: [TyVar] -> [CoreExpr] -> [(TyVar, Type)]
type_zip tvs (Coercion _ : args) = type_zip tvs args
type_zip (tv:tvs) (Type ty : args) = (tv, ty) : type_zip tvs args
type_zip _ _ = []
mk_spec_ty tyvar ty
| tyvar `elemVarSet` constrained_tyvars = Just ty
| otherwise = Nothing
want_calls_for f = isLocalId f || isJust (maybeUnfoldingTemplate (realIdUnfolding f))
-- For imported things, we gather call instances if
-- there is an unfolding that we could in principle specialise
-- We might still decide not to use it (consulting dflags)
-- in specImports
-- Use 'realIdUnfolding' to ignore the loop-breaker flag!
type_determines_value pred -- See Note [Type determines value]
= case classifyPredType pred of
ClassPred cls _ -> not (isIPClass cls) -- Superclasses can't be IPs
EqPred {} -> True
IrredPred {} -> True -- Things like (D []) where D is a
-- Constraint-ranged family; Trac #7785
{-
Note [Type determines value]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Only specialise if all overloading is on non-IP *class* params,
because these are the ones whose *type* determines their *value*. In
parrticular, with implicit params, the type args *don't* say what the
value of the implicit param is! See Trac #7101
However, consider
type family D (v::*->*) :: Constraint
type instance D [] = ()
f :: D v => v Char -> Int
If we see a call (f "foo"), we'll pass a "dictionary"
() |> (g :: () ~ D [])
and it's good to specialise f at this dictionary.
So the question is: can an implicit parameter "hide inside" a
type-family constraint like (D a). Well, no. We don't allow
type instance D Maybe = ?x:Int
Hence the IrredPred case in type_determines_value.
See Trac #7785.
Note [Interesting dictionary arguments]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Consider this
\a.\d:Eq a. let f = ... in ...(f d)...
There really is not much point in specialising f wrt the dictionary d,
because the code for the specialised f is not improved at all, because
d is lambda-bound. We simply get junk specialisations.
What is "interesting"? Just that it has *some* structure. But what about
variables?
* A variable might be imported, in which case its unfolding
will tell us whether it has useful structure
* Local variables are cloned on the way down (to avoid clashes when
we float dictionaries), and cloning drops the unfolding
(cloneIdBndr). Moreover, we make up some new bindings, and it's a
nuisance to give them unfoldings. So we keep track of the
"interesting" dictionaries as a VarSet in SpecEnv.
We have to take care to put any new interesting dictionary
bindings in the set.
We accidentally lost accurate tracking of local variables for a long
time, because cloned variables don't have unfoldings. But makes a
massive difference in a few cases, eg Trac #5113. For nofib as a
whole it's only a small win: 2.2% improvement in allocation for ansi,
1.2% for bspt, but mostly 0.0! Average 0.1% increase in binary size.
-}
interestingDict :: SpecEnv -> CoreExpr -> Bool
-- A dictionary argument is interesting if it has *some* structure
-- NB: "dictionary" arguments include constraints of all sorts,
-- including equality constraints; hence the Coercion case
interestingDict env (Var v) = hasSomeUnfolding (idUnfolding v)
|| isDataConWorkId v
|| v `elemVarSet` se_interesting env
interestingDict _ (Type _) = False
interestingDict _ (Coercion _) = False
interestingDict env (App fn (Type _)) = interestingDict env fn
interestingDict env (App fn (Coercion _)) = interestingDict env fn
interestingDict env (Tick _ a) = interestingDict env a
interestingDict env (Cast e _) = interestingDict env e
interestingDict _ _ = True
plusUDs :: UsageDetails -> UsageDetails -> UsageDetails
plusUDs (MkUD {ud_binds = db1, ud_calls = calls1})
(MkUD {ud_binds = db2, ud_calls = calls2})
= MkUD { ud_binds = db1 `unionBags` db2
, ud_calls = calls1 `unionCalls` calls2 }
plusUDList :: [UsageDetails] -> UsageDetails
plusUDList = foldr plusUDs emptyUDs
-----------------------------
_dictBindBndrs :: Bag DictBind -> [Id]
_dictBindBndrs dbs = foldrBag ((++) . bindersOf . fst) [] dbs
-- | Construct a 'DictBind' from a 'CoreBind'
mkDB :: CoreBind -> DictBind
mkDB bind = (bind, bind_fvs bind)
-- | Identify the free variables of a 'CoreBind'
bind_fvs :: CoreBind -> VarSet
bind_fvs (NonRec bndr rhs) = pair_fvs (bndr,rhs)
bind_fvs (Rec prs) = foldl delVarSet rhs_fvs bndrs
where
bndrs = map fst prs
rhs_fvs = unionVarSets (map pair_fvs prs)
pair_fvs :: (Id, CoreExpr) -> VarSet
pair_fvs (bndr, rhs) = exprFreeVars rhs `unionVarSet` idFreeVars bndr
-- Don't forget variables mentioned in the
-- rules of the bndr. C.f. OccAnal.addRuleUsage
-- Also tyvars mentioned in its type; they may not appear in the RHS
-- type T a = Int
-- x :: T a = 3
-- | Flatten a set of 'DictBind's and some other binding pairs into a single
-- recursive binding, including some additional bindings.
flattenDictBinds :: Bag DictBind -> [(Id,CoreExpr)] -> DictBind
flattenDictBinds dbs pairs
= (Rec bindings, fvs)
where
(bindings, fvs) = foldrBag add
([], emptyVarSet)
(dbs `snocBag` mkDB (Rec pairs))
add (NonRec b r, fvs') (pairs, fvs) =
((b,r) : pairs, fvs `unionVarSet` fvs')
add (Rec prs1, fvs') (pairs, fvs) =
(prs1 ++ pairs, fvs `unionVarSet` fvs')
snocDictBinds :: UsageDetails -> [DictBind] -> UsageDetails
-- Add ud_binds to the tail end of the bindings in uds
snocDictBinds uds dbs
= uds { ud_binds = ud_binds uds `unionBags`
foldr consBag emptyBag dbs }
consDictBind :: DictBind -> UsageDetails -> UsageDetails
consDictBind bind uds = uds { ud_binds = bind `consBag` ud_binds uds }
addDictBinds :: [DictBind] -> UsageDetails -> UsageDetails
addDictBinds binds uds = uds { ud_binds = listToBag binds `unionBags` ud_binds uds }
snocDictBind :: UsageDetails -> DictBind -> UsageDetails
snocDictBind uds bind = uds { ud_binds = ud_binds uds `snocBag` bind }
wrapDictBinds :: Bag DictBind -> [CoreBind] -> [CoreBind]
wrapDictBinds dbs binds
= foldrBag add binds dbs
where
add (bind,_) binds = bind : binds
wrapDictBindsE :: Bag DictBind -> CoreExpr -> CoreExpr
wrapDictBindsE dbs expr
= foldrBag add expr dbs
where
add (bind,_) expr = Let bind expr
----------------------
dumpUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpUDs bndrs uds@(MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
| null bndrs = (uds, emptyBag) -- Common in case alternatives
| otherwise = -- pprTrace "dumpUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsMentioning dump_set $ -- Drop calls mentioning bndr_set on the floor
deleteCallsFor bndrs orig_calls -- Discard calls for bndr_set; there should be
-- no calls for any of the dicts in dump_dbs
dumpBindUDs :: [CoreBndr] -> UsageDetails -> (UsageDetails, Bag DictBind, Bool)
-- Used at a lambda or case binder; just dump anything mentioning the binder
dumpBindUDs bndrs (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace "dumpBindUDs" (ppr bndrs $$ ppr free_uds $$ ppr dump_dbs) $
(free_uds, dump_dbs, float_all)
where
free_uds = MkUD { ud_binds = free_dbs, ud_calls = free_calls }
bndr_set = mkVarSet bndrs
(free_dbs, dump_dbs, dump_set) = splitDictBinds orig_dbs bndr_set
free_calls = deleteCallsFor bndrs orig_calls
float_all = dump_set `intersectsVarSet` callDetailsFVs free_calls
callsForMe :: Id -> UsageDetails -> (UsageDetails, [CallInfo])
callsForMe fn (MkUD { ud_binds = orig_dbs, ud_calls = orig_calls })
= -- pprTrace ("callsForMe")
-- (vcat [ppr fn,
-- text "Orig dbs =" <+> ppr (_dictBindBndrs orig_dbs),
-- text "Orig calls =" <+> ppr orig_calls,
-- text "Dep set =" <+> ppr dep_set,
-- text "Calls for me =" <+> ppr calls_for_me]) $
(uds_without_me, calls_for_me)
where
uds_without_me = MkUD { ud_binds = orig_dbs
, ud_calls = delDVarEnv orig_calls fn }
calls_for_me = case lookupDVarEnv orig_calls fn of
Nothing -> []
Just (CIS _ calls) -> filter_dfuns (Map.toList calls)
dep_set = foldlBag go (unitVarSet fn) orig_dbs
go dep_set (db,fvs) | fvs `intersectsVarSet` dep_set
= extendVarSetList dep_set (bindersOf db)
| otherwise = dep_set
-- Note [Specialisation of dictionary functions]
filter_dfuns | isDFunId fn = filter ok_call
| otherwise = \cs -> cs
ok_call (_, (_,fvs)) = not (fvs `intersectsVarSet` dep_set)
----------------------
splitDictBinds :: Bag DictBind -> IdSet -> (Bag DictBind, Bag DictBind, IdSet)
-- Returns (free_dbs, dump_dbs, dump_set)
splitDictBinds dbs bndr_set
= foldlBag split_db (emptyBag, emptyBag, bndr_set) dbs
-- Important that it's foldl not foldr;
-- we're accumulating the set of dumped ids in dump_set
where
split_db (free_dbs, dump_dbs, dump_idset) db@(bind, fvs)
| dump_idset `intersectsVarSet` fvs -- Dump it
= (free_dbs, dump_dbs `snocBag` db,
extendVarSetList dump_idset (bindersOf bind))
| otherwise -- Don't dump it
= (free_dbs `snocBag` db, dump_dbs, dump_idset)
----------------------
deleteCallsMentioning :: VarSet -> CallDetails -> CallDetails
-- Remove calls *mentioning* bs
deleteCallsMentioning bs calls
= mapDVarEnv filter_calls calls
where
filter_calls :: CallInfoSet -> CallInfoSet
filter_calls (CIS f calls) = CIS f (Map.filter keep_call calls)
keep_call (_, fvs) = not (fvs `intersectsVarSet` bs)
deleteCallsFor :: [Id] -> CallDetails -> CallDetails
-- Remove calls *for* bs
deleteCallsFor bs calls = delDVarEnvList calls bs
{-
************************************************************************
* *
\subsubsection{Boring helper functions}
* *
************************************************************************
-}
newtype SpecM a = SpecM (State SpecState a)
data SpecState = SpecState {
spec_uniq_supply :: UniqSupply,
spec_module :: Module,
spec_dflags :: DynFlags
}
instance Functor SpecM where
fmap = liftM
instance Applicative SpecM where
pure x = SpecM $ return x
(<*>) = ap
instance Monad SpecM where
SpecM x >>= f = SpecM $ do y <- x
case f y of
SpecM z ->
z
fail str = SpecM $ fail str
#if __GLASGOW_HASKELL__ > 710
instance MonadFail.MonadFail SpecM where
fail str = SpecM $ fail str
#endif
instance MonadUnique SpecM where
getUniqueSupplyM
= SpecM $ do st <- get
let (us1, us2) = splitUniqSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us2 }
return us1
getUniqueM
= SpecM $ do st <- get
let (u,us') = takeUniqFromSupply $ spec_uniq_supply st
put $ st { spec_uniq_supply = us' }
return u
instance HasDynFlags SpecM where
getDynFlags = SpecM $ liftM spec_dflags get
instance HasModule SpecM where
getModule = SpecM $ liftM spec_module get
runSpecM :: DynFlags -> Module -> SpecM a -> CoreM a
runSpecM dflags this_mod (SpecM spec)
= do us <- getUniqueSupplyM
let initialState = SpecState {
spec_uniq_supply = us,
spec_module = this_mod,
spec_dflags = dflags
}
return $ evalState spec initialState
mapAndCombineSM :: (a -> SpecM (b, UsageDetails)) -> [a] -> SpecM ([b], UsageDetails)
mapAndCombineSM _ [] = return ([], emptyUDs)
mapAndCombineSM f (x:xs) = do (y, uds1) <- f x
(ys, uds2) <- mapAndCombineSM f xs
return (y:ys, uds1 `plusUDs` uds2)
extendTvSubstList :: SpecEnv -> [(TyVar,Type)] -> SpecEnv
extendTvSubstList env tv_binds
= env { se_subst = CoreSubst.extendTvSubstList (se_subst env) tv_binds }
substTy :: SpecEnv -> Type -> Type
substTy env ty = CoreSubst.substTy (se_subst env) ty
substCo :: SpecEnv -> Coercion -> Coercion
substCo env co = CoreSubst.substCo (se_subst env) co
substBndr :: SpecEnv -> CoreBndr -> (SpecEnv, CoreBndr)
substBndr env bs = case CoreSubst.substBndr (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
substBndrs :: SpecEnv -> [CoreBndr] -> (SpecEnv, [CoreBndr])
substBndrs env bs = case CoreSubst.substBndrs (se_subst env) bs of
(subst', bs') -> (env { se_subst = subst' }, bs')
cloneBindSM :: SpecEnv -> CoreBind -> SpecM (SpecEnv, SpecEnv, CoreBind)
-- Clone the binders of the bind; return new bind with the cloned binders
-- Return the substitution to use for RHSs, and the one to use for the body
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (NonRec bndr rhs)
= do { us <- getUniqueSupplyM
; let (subst', bndr') = CoreSubst.cloneIdBndr subst us bndr
interesting' | interestingDict env rhs
= interesting `extendVarSet` bndr'
| otherwise = interesting
; return (env, env { se_subst = subst', se_interesting = interesting' }
, NonRec bndr' rhs) }
cloneBindSM env@(SE { se_subst = subst, se_interesting = interesting }) (Rec pairs)
= do { us <- getUniqueSupplyM
; let (subst', bndrs') = CoreSubst.cloneRecIdBndrs subst us (map fst pairs)
env' = env { se_subst = subst'
, se_interesting = interesting `extendVarSetList`
[ v | (v,r) <- pairs, interestingDict env r ] }
; return (env', env', Rec (bndrs' `zip` map snd pairs)) }
newDictBndr :: SpecEnv -> CoreBndr -> SpecM CoreBndr
-- Make up completely fresh binders for the dictionaries
-- Their bindings are going to float outwards
newDictBndr env b = do { uniq <- getUniqueM
; let n = idName b
ty' = substTy env (idType b)
; return (mkUserLocalOrCoVar (nameOccName n) uniq ty' (getSrcSpan n)) }
newSpecIdSM :: Id -> Type -> SpecM Id
-- Give the new Id a similar occurrence name to the old one
newSpecIdSM old_id new_ty
= do { uniq <- getUniqueM
; let name = idName old_id
new_occ = mkSpecOcc (nameOccName name)
new_id = mkUserLocalOrCoVar new_occ uniq new_ty (getSrcSpan name)
; return new_id }
{-
Old (but interesting) stuff about unboxed bindings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
What should we do when a value is specialised to a *strict* unboxed value?
map_*_* f (x:xs) = let h = f x
t = map f xs
in h:t
Could convert let to case:
map_*_Int# f (x:xs) = case f x of h# ->
let t = map f xs
in h#:t
This may be undesirable since it forces evaluation here, but the value
may not be used in all branches of the body. In the general case this
transformation is impossible since the mutual recursion in a letrec
cannot be expressed as a case.
There is also a problem with top-level unboxed values, since our
implementation cannot handle unboxed values at the top level.
Solution: Lift the binding of the unboxed value and extract it when it
is used:
map_*_Int# f (x:xs) = let h = case (f x) of h# -> _Lift h#
t = map f xs
in case h of
_Lift h# -> h#:t
Now give it to the simplifier and the _Lifting will be optimised away.
The benfit is that we have given the specialised "unboxed" values a
very simplep lifted semantics and then leave it up to the simplifier to
optimise it --- knowing that the overheads will be removed in nearly
all cases.
In particular, the value will only be evaluted in the branches of the
program which use it, rather than being forced at the point where the
value is bound. For example:
filtermap_*_* p f (x:xs)
= let h = f x
t = ...
in case p x of
True -> h:t
False -> t
==>
filtermap_*_Int# p f (x:xs)
= let h = case (f x) of h# -> _Lift h#
t = ...
in case p x of
True -> case h of _Lift h#
-> h#:t
False -> t
The binding for h can still be inlined in the one branch and the
_Lifting eliminated.
Question: When won't the _Lifting be eliminated?
Answer: When they at the top-level (where it is necessary) or when
inlining would duplicate work (or possibly code depending on
options). However, the _Lifting will still be eliminated if the
strictness analyser deems the lifted binding strict.
-}
| vikraman/ghc | compiler/specialise/Specialise.hs | bsd-3-clause | 94,194 | 1 | 22 | 27,570 | 11,139 | 6,040 | 5,099 | -1 | -1 |
{-# LANGUAGE GADTs #-}
module Architecture.ARM.Instructions.UAL.Semantics where
import Architecture.ARM.State
import Architecture.ARM.Instructions.UAL
import Data.Record.Label
-- This should be put somewhere else, maybe even in a separate package
data Expr a where
Const :: a -> Expr a
(:+) :: Expr a -> Expr a -> Expr a
(:-) :: Expr a -> Expr a -> Expr a
(:*) :: Expr a -> Expr a -> Expr a
(:/) :: Expr a -> Expr a -> Expr a
(:<<) :: Expr a -> Expr a -> Expr a
(:>>) :: Expr a -> Expr a -> Expr a
(:<<<) :: Expr a -> Expr a -> Expr a
(:>>>) :: Expr a -> Expr a -> Expr a
(:&) :: Expr a -> Expr a -> Expr a
(:|) :: Expr a -> Expr a -> Expr a
{-
-- Num should be an expression, and we should steal all the operators
-- Need state monad
evaluateConditional :: Num r => Conditional -> ARMState r m -> ARMState r m
evaluateConditional (B off) = modL pc (+ fromIntegral off)
evaluateConditional (BX r) = do x <- getL (reg r)
setL pc x
-- set arm/thumb bit
evaluateConditional x = error $ "haven't implemented semantics for conditional instruction " ++ show x
evaluate :: UALInstruction -> ARMState r m -> ARMState r m
evaluate x = error $ "haven't implemented semantics for instruction " ++ show x
-} | copumpkin/charm | src/Architecture/ARM/Instructions/UAL/Semantics.hs | bsd-3-clause | 1,306 | 0 | 8 | 345 | 288 | 153 | 135 | 17 | 0 |
{-# LANGUAGE DeriveGeneric #-}
module Crawl.Stats.Armour where
import qualified Data.Csv as CSV
import GHC.Generics (Generic)
import qualified Data.Default as Default
import qualified Crawl.Stats.Named as Named
data Armour = Armour {
name :: String,
baseAc :: Integer,
encumbrance :: Integer,
gdr :: Integer
} deriving (Generic, Eq)
instance CSV.FromNamedRecord Armour
instance Named.Named Armour where
name = name
instance Default.Default Armour where
def = Armour "none" 0 0 0
| jfrikker/crawlstats | src/Crawl/Stats/Armour.hs | bsd-3-clause | 498 | 0 | 8 | 86 | 135 | 82 | 53 | 17 | 0 |
{-# LANGUAGE QuasiQuotes, OverloadedStrings, FlexibleContexts, FlexibleInstances #-}
module Data.Excelx where
import Data.Maybe
import Data.Monoid
import Data.Time.LocalTime
import Data.Time.Calendar
import Control.Monad
import Control.Monad.Reader
import qualified Data.Text as T
import qualified Data.ByteString.Lazy as BS
import Codec.Archive.Zip
import Text.XML as XML
import Text.XML.Cursor
type Formula = T.Text
data Position = R1C1 Int Int | A1 T.Text deriving Show
data Cell = NumericCell Position Double
| TextCell Position T.Text
| FormulaCell Position Formula Cell
| BlankCell Position
| NoCell Position deriving Show
class FromCell a where
fromCell :: Cell -> a
instance FromCell Double where
fromCell c = case valueCell c of
NumericCell _ d -> d
_ -> 0.0
instance FromCell Bool where
fromCell c = case valueCell c of
NumericCell _ d -> d > 0.0
TextCell _ t -> T.length t > 0
_ -> False
instance FromCell Integer where
fromCell c = floor (fromCell c :: Double)
instance FromCell T.Text where
fromCell c = case valueCell c of
TextCell _ t -> t
NumericCell _ t -> T.pack $ show t
_ -> ""
instance FromCell String where
fromCell c = T.unpack $ fromCell c
instance FromCell Day where
fromCell c = addDays (fromCell c) (fromGregorian 1899 12 30)
instance FromCell TimeOfDay where
fromCell c = dayFractionToTimeOfDay part
where
d = fromCell c :: Double
part = toRational $ d - (fromInteger $ floor d)
instance FromCell LocalTime where
fromCell c = LocalTime (fromCell c) (fromCell c)
instance FromCell a => FromCell (Maybe a) where
fromCell c = case valueCell c of
BlankCell _ -> Nothing
NoCell _ -> Nothing
_ -> Just (fromCell c)
instance FromCell a => FromCell (Either Position a) where
fromCell c = case valueCell c of
NoCell pos -> Left pos
_ -> Right $ fromCell c
valueCell :: Cell -> Cell
valueCell (FormulaCell _ _ valuecell) = valuecell
valueCell valuecell = valuecell
catCells :: [Cell] -> [Cell]
catCells cells = filter noCell cells
where noCell c = case c of
NoCell _ -> False
_ -> True
a1form :: Position -> T.Text
a1form (A1 t) = t
a1form (R1C1 i j) = T.toUpper . T.pack $ alpha (max j 1) ++ show (max i 1)
where
alphabet = map reverse [c : s | s <- "" : alphabet, c <- ['a' .. 'z']]
alpha idx = alphabet !! (idx - 1)
type SharedStringIndex = [T.Text]
type SheetIndex = [(T.Text, FilePath)]
data Excelx = Excelx {archive :: Archive, sharedStrings :: SharedStringIndex, sheetIndex :: SheetIndex}
findXMLEntry :: FilePath -> Archive -> Maybe Cursor
findXMLEntry path ar = do
entry <- findEntryByPath path ar
case parseLBS def (fromEntry entry) of
Left _ -> fail $ "Invalid entry: " ++ path
Right xml -> return $ fromDocument xml
extractSheetIndex :: Archive -> Maybe [(T.Text, String)]
extractSheetIndex ar = do
sheetXml <- findXMLEntry "xl/workbook.xml" ar
relXml <- findXMLEntry "xl/_rels/workbook.xml.rels" ar
return $ do
let relationships = relXml $.// laxElement "Relationship"
sheetListings = sheetXml $.// laxElement "sheet"
sheetListingEntry <- sheetListings
relationship <- relationships
guard $ (laxAttribute "Id") relationship == (laxAttribute "id") sheetListingEntry
sheetName <- (laxAttribute "Name") sheetListingEntry
target <- (laxAttribute "Target") relationship
return (sheetName, T.unpack $ mappend "xl/" target)
extractSharedStrings :: Archive -> Maybe [T.Text]
extractSharedStrings ar = do
sharedStringXml <- findXMLEntry "xl/sharedStrings.xml" ar
let entries = sharedStringXml $.// laxElement "si"
return $ map mconcat $ map (\e -> e $// laxElement "t" &/ content) entries
parseCell :: Excelx -> Cursor -> Position -> Cell
parseCell xlsx xmlSheet cellpos =
let at = a1form cellpos
cursor = xmlSheet $// laxElement "c" >=> attributeIs "r" at in
case listToMaybe cursor of
Nothing -> NoCell cellpos
Just c ->
let formulaCell = do
formula <- listToMaybe $ cellContents "f" c
let valcell = fromMaybe (BlankCell cellpos) $ msum [textCell, numericCell, blankCell]
return $ FormulaCell cellpos formula valcell
textCell = do
textcell <- listToMaybe $ c $.// attributeIs "t" "s"
val <- listToMaybe $ cellContents "v" textcell
return $ TextCell cellpos (sharedStrings xlsx !! (read (T.unpack val)))
numericCell = do
v <- listToMaybe $ cellContents "v" c
return $ NumericCell cellpos (read $ T.unpack v)
blankCell = return $ BlankCell cellpos
in fromJust $ msum [formulaCell, textCell, numericCell, blankCell]
where
cellContents tag xml = xml $.// laxElement tag &.// content
parseRow :: Excelx -> Cursor -> Int -> [Cell]
parseRow xlsx sheetCur rownum =
let rowxml = sheetCur $// laxElement "row" >=> attributeIs "r" (T.pack $ show rownum)
addrs = concatMap (\rx -> rx $// laxElement "c" >=> attribute "r") rowxml
in map (parseCell xlsx sheetCur) (map A1 addrs)
maxRow :: Cursor -> Int
maxRow sheetxml = foldr max 0 $ map (read . T.unpack) $ sheetxml $.// laxElement "row" >=> laxAttribute "r"
extractSheet :: T.Text -> Excelx -> Maybe Cursor
extractSheet sheetname xlsx = do
sheetPath <- lookup sheetname $ sheetIndex xlsx
findXMLEntry sheetPath $ archive xlsx
toExcelx :: BS.ByteString -> Maybe Excelx
toExcelx bytes = do
let ar = toArchive bytes
sharedStringList <- extractSharedStrings ar
sheetIdx <- extractSheetIndex ar
return $ Excelx ar sharedStringList sheetIdx
openExcelx :: FilePath -> IO (Maybe Excelx)
openExcelx f = do
ar <- BS.readFile f
return $ toExcelx ar
type Sheet = (Excelx, Cursor)
type Row = [Cell]
type ExcelReader a = ReaderT (Excelx, Cursor) Maybe a
sheet :: MonadReader Excelx m => T.Text -> m (Maybe (Excelx, Cursor))
sheet name = do
xlsx <- ask
let sheetx = extractSheet name xlsx
return $ fmap (\sheetxml -> (xlsx, sheetxml)) sheetx
row :: MonadReader (Excelx, Cursor) m => Int -> m [Cell]
row num = do
(xlsx, sheetx) <- ask
return $ parseRow xlsx sheetx num
rows :: MonadReader (Excelx, Cursor) m => m [Row]
rows = do
(_, sheetx) <- ask
mapM row [1 .. maxRow sheetx]
cell :: MonadReader (Excelx, Cursor) m => Position -> m Cell
cell cellpos = do
(xlsx, sheetx) <- ask
return $ parseCell xlsx sheetx cellpos
sparseColumn :: MonadReader (Excelx, Cursor) m => Int -> m [Cell]
sparseColumn colidx = do
(_, sheetx) <- ask
mapM cell $ map (flip R1C1 colidx) [1 .. maxRow sheetx]
column :: MonadReader (Excelx, Cursor) m => Int -> m [Cell]
column colidx = liftM catCells (sparseColumn colidx)
inExcel :: b -> ReaderT b m a -> m a
inExcel = flip runReaderT
inSheet :: MonadReader Excelx m => T.Text -> ReaderT (Excelx, Cursor) m b -> m b
inSheet name action = do
maybeSheet <- sheet name
case maybeSheet of
Just sheetx -> runReaderT action sheetx
Nothing -> fail "Sheet not found."
inExcelSheet :: Monad m => Excelx -> T.Text -> ReaderT (Excelx, Cursor) (ReaderT Excelx m) a -> m a
inExcelSheet xlsx name action = inExcel xlsx $ inSheet name action
| mebaran/hs-excelx | src/Data/Excelx.hs | bsd-3-clause | 7,488 | 0 | 24 | 1,889 | 2,639 | 1,310 | 1,329 | 175 | 2 |
module Guide.Types.Session
(
GuideData (..),
sessionUserID,
emptyGuideData,
SpockSession,
GuideSession,
sess_id,
sess_csrfToken,
sess_validUntil,
sess_data,
unwrapSession,
wrapSession,
SessionId,
)
where
import Imports
import Data.SafeCopy hiding (kind)
import Data.SafeCopy.Migrate
import Web.Spock.Internal.SessionManager (SessionId)
import Guide.Types.User
import Guide.Uid
import qualified Web.Spock.Internal.SessionManager as Spock
type SpockSession conn st = Spock.Session conn GuideData st
-- | GuideData is the session data exposed by Spock.SessionAction operations.
data GuideData = GuideData {
-- | If logged in, must be a valid userID
_sessionUserID :: Maybe (Uid User)
}
deriving (Show, Eq, Data)
deriveSafeCopySorted 0 'base ''GuideData
makeLenses ''GuideData
emptyGuideData :: GuideData
emptyGuideData = GuideData {
_sessionUserID = Nothing }
data GuideSession = GuideSession {
_sess_id :: !SessionId,
_sess_csrfToken :: !Text,
_sess_validUntil :: !UTCTime,
_sess_data :: !GuideData }
deriving (Show, Eq, Data)
deriveSafeCopySorted 0 'base ''GuideSession
makeLenses ''GuideSession
unwrapSession :: GuideSession -> SpockSession conn st
unwrapSession (GuideSession {..}) = Spock.Session {
sess_id = _sess_id,
sess_csrfToken = _sess_csrfToken,
sess_validUntil = _sess_validUntil,
sess_data = _sess_data
}
wrapSession :: SpockSession conn st -> GuideSession
wrapSession s = GuideSession {
_sess_id = Spock.sess_id s,
_sess_csrfToken = Spock.sess_csrfToken s,
_sess_validUntil = Spock.sess_validUntil s,
_sess_data = Spock.sess_data s
}
| aelve/guide | back/src/Guide/Types/Session.hs | bsd-3-clause | 1,668 | 0 | 11 | 305 | 400 | 231 | 169 | -1 | -1 |
{-# LANGUAGE GeneralizedNewtypeDeriving #-}
{-# LANGUAGE OverloadedStrings #-}
{-# LANGUAGE RecordWildCards #-}
{-# LANGUAGE StandaloneDeriving #-}
module Logic.Pheasant where
import Data.Map.Strict (Map)
import qualified Data.Map.Strict as Map
import Data.Set (Set)
import qualified Data.Set as Set
import Data.String
import Data.Text (Text)
import qualified Data.Text as T
import Data.Vector (Vector)
import qualified Data.Vector as V
newtype RelName = RelName String
deriving (Eq, Ord, Show, IsString)
newtype FunName = FunName String
deriving (Eq, Ord, Show, IsString)
newtype RelArity = RelArity Int
deriving (Eq, Ord, Show)
newtype FunArity = FunArity Int
deriving (Eq, Ord, Show)
-- * Languages
data Language = Language
{ languageRelations :: Map RelName RelArity
, languageFunctions :: Map FunName FunArity
}
deriving (Show)
-- * Structures
newtype RelExtent ty = RelExtent (Set (Vector ty))
deriving (Show)
newtype FunExtent ty = FunExtent (Map (Vector ty) ty)
deriving (Show)
data Structure ty = Structure
{ structureUniverse :: Set ty
, structureRelations :: Map RelName (RelExtent ty)
, structureFunctions :: Map FunName (FunExtent ty)
}
deriving (Show)
-- * Models
-- $ A structure which is compatible with a language might be taken to form a
-- model for it.
-- | A 'Model' is witness to the fact that a 'Structure' is suitable for a
-- 'Language'.
data Model ty = Model
{ modelLanguage :: Language
, modelStructure :: Structure ty
}
deriving (Show)
-- | Check that structure is suitable for a language.
--
-- This means:
-- - Every relation symbol in L is in S with the same arity.
-- - Every function symbol in L is in S with the same arity.
suitable :: Language -> Structure ty -> Maybe (Model ty)
suitable l@Language{..} s@Structure{..} = do
_ <- suitableRelations languageRelations structureRelations
_ <- suitableFunctions languageFunctions structureFunctions
return (Model l s)
suitableRelations
:: Map RelName RelArity
-> Map RelName (RelExtent ty)
-> Maybe (Map RelName (RelExtent ty))
suitableRelations l s =
let s' = Map.mapMaybe id (Map.intersectionWithKey fn l s)
in if Map.size s == Map.size s'
then Just s'
else Nothing
where
fn :: RelName -> RelArity -> RelExtent ty -> Maybe (RelExtent ty)
fn k ra@(RelArity a) re@(RelExtent e) =
if (Set.singleton a) == (Set.map V.length e) then Just re else Nothing
suitableFunctions
:: Map FunName FunArity
-> Map FunName (FunExtent ty)
-> Maybe (Map FunName (FunExtent ty))
suitableFunctions l s =
let s' = Map.mapMaybe id (Map.intersectionWithKey fn l s)
in if Map.size s == Map.size s' then Just s' else Nothing
where
fn :: FunName -> FunArity -> FunExtent ty -> Maybe (FunExtent ty)
fn k fa@(FunArity a) fe@(FunExtent e) =
if Set.singleton (a + 1) == Set.fromList (map V.length (Map.keys e))
then Just fe
else Nothing
-- * Formulae
-- $ First-order formulae over a language.
newtype VarName = VarName String
deriving (Eq, Ord, Show, IsString)
-- | Terms of a language.
--
-- When interpreted by a model the terms are identified with objects in in the
-- universe.
data Term
= TermVar VarName
| TermFun FunName (Vector Term) -- Must check arity
-- | First-order formulae over terms.
data Formula
= FmlEq Term Term
| FmlRel RelName (Vector Term) -- Must check arity
| FmlNot Formula
| FmlCon Formula Formula
| FmlDis Formula Formula
| FmlA Formula
| FmlE Formula
-- | Identify atomic formulae.
atomic :: Formula -> Bool
atomic FmlEq{} = True
atomic FmlRel{} = True
atomic _ = False
| thsutton/pheasant | lib/Logic/Pheasant.hs | bsd-3-clause | 3,795 | 0 | 14 | 905 | 1,062 | 577 | 485 | 85 | 3 |
{-# LANGUAGE FlexibleContexts, OverloadedStrings #-}
{-# LANGUAGE OverloadedLists #-}
{-# LANGUAGE NoImplicitPrelude #-}
-- | Checks the balances of accounts. Useful to keep your ledgers
-- consistent with the statements that come from the bank.
module Penny.BalanceCheck (checkBalances) where
import qualified Control.Lens as Lens
import qualified Data.Foldable as Fdbl
import qualified Data.Map as M
import qualified Data.Sequence as Seq
import qualified Rainbow
import qualified Turtle.Bytes as Bytes
import qualified Turtle.Shell as Shell
import Penny.Account
import Penny.Amount (Amount(Amount))
import Penny.Balance
import Penny.Commodity
import Penny.Copper (parseConvertProofIO)
import Penny.Copper.Copperize
import Penny.Copper.Tracompri
import Penny.Decimal
import Penny.Ents
import Penny.Polar
import Penny.Positive
import Penny.Prelude
import Penny.SeqUtil (catMaybes)
import Penny.Tranche
import Penny.Transaction
import Penny.Troika
import Penny.Unix
-- | Checks the balance of a particular account.
checkAccount
:: Seq (Transaction a)
-> (Account, Seq (Day, Seq (Commodity, Pole, DecPositive)))
-> (Seq (Chunk Text), Bool)
checkAccount txns (tgtAcct, days) = (reports, cumulative)
where
reports = thisAcct <> join (fmap fst results)
thisAcct =
[ resultChunk cumulative
, Rainbow.chunk $ " in account " <> pack (show tgtAcct) <> "\n"
]
cumulative = all snd results
daysAndAmts = join . fmap (filterAccount tgtAcct) $ txns
results = fmap (checkDay daysAndAmts) days
-- | Checks the balance of a particular day, along with its
-- commodities, poles, and DecPositive.
checkDay
:: Seq (Day, Amount)
-- ^ Postings in this account
-> (Day, Seq (Commodity, Pole, DecPositive))
-- ^ Check this day
-> (Seq (Chunk Text), Bool)
-- ^ Report, and result.
checkDay pstgs (tgtDay, tgtBals) = (reports, cumulative)
where
reports = thisDay <> join (fmap fst results)
where
thisDay =
[ Rainbow.chunk " "
, resultChunk cumulative
, Rainbow.chunk (" on day " <> pack (show tgtDay))
, Rainbow.chunk "\n"
]
cumulative = all snd results
bal = getBalanceOnDay tgtDay pstgs
results = fmap (checkBalance bal) tgtBals
-- | Checks the balance of a particular commodity, pole, and DecPositive.
checkBalance
:: Balance
-> (Commodity, Pole, DecPositive)
-> (Seq (Chunk Text), Bool)
-- ^ A descriptive report. Also, True if the balance is OK, False if not.
checkBalance bal (cy, tgtPole, tgtBal) = (desc, result)
where
desc =
[ Rainbow.chunk " "
, resultChunk result
, Rainbow.chunk (" commodity: " <> cy)
, Rainbow.chunk (" target side: " <> getSideTxt (Just tgtPole))
, Rainbow.chunk (" target balance: " <> tgtBalTxt)
, Rainbow.chunk
(" actual side: " <> getSideTxt (fmap snd mayActualBalAndSide))
, Rainbow.chunk (" actual balance: " <> actualBalanceTxt)
, Rainbow.chunk ("\n")
]
result = case mayActualBalAndSide of
Nothing -> False
Just (actBal, actSide) -> cmpPositive (==) actBal tgtBal && actSide == tgtPole
getSideTxt maySd = case maySd of
Nothing -> "(none)"
Just sd
| sd == debit -> "debit"
| otherwise -> "credit"
tgtBalTxt = decimalText (Right Nothing) . fmap c'Integer'Positive $ tgtBal
actualBalanceTxt = case mayActualBalAndSide of
Nothing -> "(none)"
Just (dec, _) -> decimalText (Right Nothing) . fmap c'Integer'Positive $ dec
mayActualBalAndSide = do
balDec <- M.lookup cy . view _Wrapped' $ bal
either (const Nothing) Just . stripDecimalSign $ balDec
resultChunk :: Bool -> Chunk Text
resultChunk b
| b = Rainbow.chunk "[ OK ]" & Rainbow.fore Rainbow.green
| otherwise = Rainbow.chunk "[FAIL]" & Rainbow.fore Rainbow.red
-- | Gets the balance for a set of Transaction.
getBalanceOnDay
:: Day
-- ^ Only get postings that are on or before this Day.
-> Seq (Day, Amount)
-> Balance
getBalanceOnDay dy
= Fdbl.foldl' f mempty
. fmap snd
. Seq.filter ((<= dy) . fst)
where
f bal amt = bal <> (c'Balance'Amount amt)
-- | Takes a list of Transaction and pulls only the reconciled
-- postings that are from a single account.
filterAccount
:: Account
-> Transaction a
-> Seq (Day, Amount)
filterAccount acct txn
= fmap extract . Seq.filter pd . balancedToSeqEnt . _postings $ txn
where
extract (troika, _) = (view (topLine . day) txn, Amount cy dec)
where
cy = view commodity troika
dec = c'Decimal'Troika troika
pd (_, postline)
= view account postline == acct
&& reconciled postline
pureCheckBalances
:: Seq (Transaction a)
-- ^ All transactions
-> Seq (Account, Seq (Day, Seq (Commodity, Pole, DecPositive)))
-> (Seq (Chunk Text), Bool)
-- ^ Returns a report showing whether each account passed or failed.
-- Also returns True if all accounts were OK, or False if there were
-- any failures.
pureCheckBalances txns acctsAndDays = (reports, cumulative)
where
results = fmap (checkAccount txns) acctsAndDays
reports = join . fmap fst $ results
cumulative = all snd results
-- | Checks balances and gives a visual report.
loadAndCheckBalances
:: Seq (Account, Seq (Day, Seq (Commodity, Pole, DecPositive)))
-- ^ Accounts and balances to check
-> Seq FilePath
-- ^ List of filenames to load
-> IO (Seq (Chunk Text), Bool)
loadAndCheckBalances toCheck loads = do
neSeqs <- parseConvertProofIO loads
let txns
= catMaybes
. fmap (Lens.preview _Tracompri'Transaction)
. join
$ neSeqs
return (pureCheckBalances txns toCheck)
-- | Checks balances. Sends output to @less@ with 256 colors.
checkBalances
:: Seq (Account, Seq (Day, Seq (Commodity, Pole, DecPositive)))
-- ^ Accounts and balances to check
-> Seq FilePath
-- ^ List of filenames to load
-> IO ()
checkBalances toCheck files = do
(results, _) <- liftIO $ loadAndCheckBalances toCheck files
maker <- liftIO Rainbow.byteStringMakerFromEnvironment
let strings = Rainbow.chunksToByteStrings maker (Fdbl.toList results)
Bytes.procs "less" lessOpts (Shell.select strings)
| massysett/penny | penny/lib/Penny/BalanceCheck.hs | bsd-3-clause | 6,211 | 0 | 16 | 1,365 | 1,681 | 896 | 785 | 138 | 4 |
module Rules.Compile (compilePackage) where
import Hadrian.BuildPath
import Hadrian.Oracles.TextFile
import Base
import Context as C
import Expression
import Oracles.Flag (platformSupportsSharedLibs)
import Rules.Generate
import Settings
import Target
import Utilities
import qualified Text.Parsec as Parsec
-- * Rules for building objects and Haskell interface files
compilePackage :: [(Resource, Int)] -> Rules ()
compilePackage rs = do
root <- buildRootRules
-- We match all file paths that look like:
-- <root>/...stuffs.../build/...stuffs.../<something>.<suffix>
--
-- where:
-- - the '...stuffs...' bits can be one or more path components,
-- - the '<suffix>' part is a way prefix (e.g thr_p_, or nothing if
-- vanilla) followed by an object file extension, without the dot
-- (o, o-boot, hi, hi-boot),
--
-- and parse the information we need (stage, package path, ...) from
-- the path and figure out the suitable way to produce that object file.
alternatives $ do
-- Language is identified by subdirectory under /build.
-- These are non-haskell files so only have a .o or .<way>_o suffix.
[ root -/- "**/build/c/**/*." ++ wayPat ++ "o"
| wayPat <- wayPats] |%> compileNonHsObject rs C
[ root -/- "**/build/cmm/**/*." ++ wayPat ++ "o"
| wayPat <- wayPats] |%> compileNonHsObject rs Cmm
[ root -/- "**/build/s/**/*." ++ wayPat ++ "o"
| wayPat <- wayPats] |%> compileNonHsObject rs Asm
[ root -/- "**/build/S/**/*." ++ wayPat ++ "o"
| wayPat <- wayPats] |%> compileNonHsObject rs Asm
-- All else is haskell.
-- These come last as they overlap with the above rules' file patterns.
-- When building dynamically we depend on the static rule if shared libs
-- are supported, because it will add the -dynamic-too flag when
-- compiling to build the dynamic files alongside the static files
[ root -/- "**/build/**/*.dyn_o", root -/- "**/build/**/*.dyn_hi" ]
&%> \ [dyn_o, _dyn_hi] -> do
p <- platformSupportsSharedLibs
if p
then need [dyn_o -<.> "o", dyn_o -<.> "hi"]
else compileHsObjectAndHi rs dyn_o
forM_ ((,) <$> hsExts <*> wayPats) $ \ ((oExt, hiExt), wayPat) ->
[ root -/- "**/build/**/*." ++ wayPat ++ oExt
, root -/- "**/build/**/*." ++ wayPat ++ hiExt ]
&%> \ [o, _hi] -> compileHsObjectAndHi rs o
where
hsExts = [ ("o", "hi")
, ("o-boot", "hi-boot")
]
wayPats = [ "", "*_" ]
-- * Object file paths types and parsers
{- We are using a non uniform representation that separates
object files produced from Haskell code and from other
languages, because the two "groups" have to be parsed
differently enough that this would complicated the parser
significantly.
Indeed, non-Haskell files can only produce .o (or .thr_o, ...)
files while Haskell modules can produce those as well as
interface files, both in -boot or non-boot variants.
Moreover, non-Haskell object files live under:
<root>/stage<N>/<path/to/pkg>/build/{c,cmm,s}/
while Haskell object/interface files live under:
<root>/stage<N>/<path/to/pkg>/build/
So the kind of object is partially determined by
whether we're in c/, cmm/ or s/ but also by the
object file's extension, in the case of a Haskell file.
This could have been addressed with some knot-tying but
Parsec's monad doesn't give us a MonadFix instance.
We therefore stick to treating those two type of object
files non uniformly.
-}
-- | Non Haskell source languages that we compile to get object files.
data SourceLang = Asm | C | Cmm deriving (Eq, Show)
parseSourceLang :: Parsec.Parsec String () SourceLang
parseSourceLang = Parsec.choice
[ Parsec.char 'c' *> Parsec.choice
[ Parsec.string "mm" *> pure Cmm
, pure C
]
, Parsec.char 's' *> pure Asm
]
type Basename = String
parseBasename :: Parsec.Parsec String () Basename
parseBasename = Parsec.manyTill Parsec.anyChar (Parsec.try $ Parsec.char '.')
-- | > <c|cmm|s>/<file>.<way prefix>_o
data NonHsObject = NonHsObject SourceLang Basename Way
deriving (Eq, Show)
parseNonHsObject :: Parsec.Parsec String () NonHsObject
parseNonHsObject = do
lang <- parseSourceLang
_ <- Parsec.char '/'
file <- parseBasename
way <- parseWayPrefix vanilla
_ <- Parsec.char 'o'
return (NonHsObject lang file way)
-- | > <o|hi|o-boot|hi-boot>
data SuffixType = O | Hi | OBoot | HiBoot deriving (Eq, Show)
parseSuffixType :: Parsec.Parsec String () SuffixType
parseSuffixType = Parsec.choice
[ Parsec.char 'o' *> Parsec.choice
[ Parsec.string "-boot" *> pure OBoot
, pure O
]
, Parsec.string "hi" *> Parsec.choice
[ Parsec.string "-boot" *> pure HiBoot
, pure Hi
]
]
-- | > <way prefix>_<o|hi|o-boot|hi-boot>
data Extension = Extension Way SuffixType deriving (Eq, Show)
parseExtension :: Parsec.Parsec String () Extension
parseExtension = Extension <$> parseWayPrefix vanilla <*> parseSuffixType
-- | > <file>.<way prefix>_<o|hi|o-boot|hi-boot>
data HsObject = HsObject Basename Extension deriving (Eq, Show)
parseHsObject :: Parsec.Parsec String () HsObject
parseHsObject = do
file <- parseBasename
ext <- parseExtension
return (HsObject file ext)
data Object = Hs HsObject | NonHs NonHsObject deriving (Eq, Show)
parseObject :: Parsec.Parsec String () Object
parseObject = Parsec.choice
[ NonHs <$> parseNonHsObject
, Hs <$> parseHsObject ]
-- * Toplevel parsers
parseBuildObject :: FilePath -> Parsec.Parsec String () (BuildPath Object)
parseBuildObject root = parseBuildPath root parseObject
-- * Getting contexts from objects
objectContext :: BuildPath Object -> Context
objectContext (BuildPath _ stage pkgPath obj) =
Context stage (unsafeFindPackageByPath pkgPath) way
where
way = case obj of
NonHs (NonHsObject _lang _file w) -> w
Hs (HsObject _file (Extension w _suf)) -> w
-- * Building an object
compileHsObjectAndHi
:: [(Resource, Int)] -> FilePath -> Action ()
compileHsObjectAndHi rs objpath = do
root <- buildRoot
b@(BuildPath _root stage _path _o)
<- parsePath (parseBuildObject root) "<object file path parser>" objpath
let ctx = objectContext b
way = C.way ctx
ctxPath <- contextPath ctx
(src, deps) <- lookupDependencies (ctxPath -/- ".dependencies") objpath
need (src:deps)
-- The .dependencies file lists indicating inputs. ghc will
-- generally read more *.hi and *.hi-boot files (direct inputs).
-- Allow such reads (see https://gitlab.haskell.org/ghc/ghc/wikis/Developing-Hadrian#haskell-object-files-and-hi-inputs)
-- Note that this may allow too many *.hi and *.hi-boot files, but
-- calculating the exact set of direct inputs is not feasible.
trackAllow [ "**/*." ++ hisuf way
, "**/*." ++ hibootsuf way
]
buildWithResources rs $ target ctx (Ghc CompileHs stage) [src] [objpath]
compileNonHsObject :: [(Resource, Int)] -> SourceLang -> FilePath -> Action ()
compileNonHsObject rs lang path = do
root <- buildRoot
b@(BuildPath _root stage _path _o)
<- parsePath (parseBuildObject root) "<object file path parser>" path
let
ctx = objectContext b
builder = case lang of
C -> Ghc CompileCWithGhc
_ -> Ghc CompileHs
src <- case lang of
Asm -> obj2src "S" (const False) ctx path
C -> obj2src "c" (const False) ctx path
Cmm -> obj2src "cmm" isGeneratedCmmFile ctx path
need [src]
needDependencies ctx src (path <.> "d")
buildWithResources rs $ target ctx (builder stage) [src] [path]
-- * Helpers
-- | Discover dependencies of a given source file by iteratively calling @gcc@
-- in the @-MM -MG@ mode and building generated dependencies if they are missing
-- until reaching a fixed point.
needDependencies :: Context -> FilePath -> FilePath -> Action ()
needDependencies context@Context {..} src depFile = discover
where
discover = do
build $ target context (Cc FindCDependencies stage) [src] [depFile]
deps <- parseFile depFile
-- Generated dependencies, if not yet built, will not be found and hence
-- will be referred to simply by their file names.
let notFound = filter (\file -> file == takeFileName file) deps
-- We find the full paths to generated dependencies, so we can request
-- to build them by calling 'need'.
todo <- catMaybes <$> mapM (fullPathIfGenerated context) notFound
if null todo
then need deps -- The list of dependencies is final, need all
else do
need todo -- Build newly discovered generated dependencies
discover -- Continue the discovery process
parseFile :: FilePath -> Action [String]
parseFile file = do
input <- liftIO $ readFile file
case parseMakefile input of
[(_file, deps)] -> return deps
_ -> return []
-- | Find a given 'FilePath' in the list of generated files in the given
-- 'Context' and return its full path.
fullPathIfGenerated :: Context -> FilePath -> Action (Maybe FilePath)
fullPathIfGenerated context file = interpretInContext context $ do
generated <- generatedDependencies
return $ find ((== file) . takeFileName) generated
obj2src :: String -> (FilePath -> Bool) -> Context -> FilePath -> Action FilePath
obj2src extension isGenerated context@Context {..} obj
| isGenerated src = return src
| otherwise = (pkgPath package ++) <$> suffix
where
src = obj -<.> extension
suffix = do
path <- buildPath context
return $ fromMaybe ("Cannot determine source for " ++ obj)
$ stripPrefix (path -/- extension) src
| sdiehl/ghc | hadrian/src/Rules/Compile.hs | bsd-3-clause | 9,882 | 0 | 17 | 2,313 | 2,130 | 1,095 | 1,035 | -1 | -1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.